]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.5.4-201209171824.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.5.4-201209171824.patch
CommitLineData
ed532dcf
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b4a898f..cd023f2 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52@@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56+ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60@@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64+builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70+clut_vga16.c
71+common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78+config.c
79 config.mak
80 config.mak.autogen
81+config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85@@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89+dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93+exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97@@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101+gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108+hash
109+hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113@@ -145,7 +163,7 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117-kconfig
118+kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122@@ -153,7 +171,7 @@ kxgettext
123 lkc_defs.h
124 lex.c
125 lex.*.c
126-linux
127+lib1funcs.S
128 logo_*.c
129 logo_*_clut224.c
130 logo_*_mono.c
131@@ -164,14 +182,15 @@ machtypes.h
132 map
133 map_hugetlb
134 maui_boot.h
135-media
136 mconf
137+mdp
138 miboot*
139 mk_elfconfig
140 mkboot
141 mkbugboot
142 mkcpustr
143 mkdep
144+mkpiggy
145 mkprep
146 mkregtable
147 mktables
148@@ -188,6 +207,8 @@ oui.c*
149 page-types
150 parse.c
151 parse.h
152+parse-events*
153+pasyms.h
154 patches*
155 pca200e.bin
156 pca200e_ecd.bin2
157@@ -197,6 +218,7 @@ perf-archive
158 piggyback
159 piggy.gzip
160 piggy.S
161+pmu-*
162 pnmtologo
163 ppc_defs.h*
164 pss_boot.h
165@@ -206,7 +228,10 @@ r200_reg_safe.h
166 r300_reg_safe.h
167 r420_reg_safe.h
168 r600_reg_safe.h
169+realmode.lds
170+realmode.relocs
171 recordmcount
172+regdb.c
173 relocs
174 rlim_names.h
175 rn50_reg_safe.h
176@@ -216,8 +241,11 @@ series
177 setup
178 setup.bin
179 setup.elf
180+size_overflow_hash.h
181 sImage
182+slabinfo
183 sm_tbl*
184+sortextable
185 split-include
186 syscalltab.h
187 tables.c
188@@ -227,6 +255,7 @@ tftpboot.img
189 timeconst.h
190 times.h*
191 trix_boot.h
192+user_constants.h
193 utsrelease.h*
194 vdso-syms.lds
195 vdso.lds
196@@ -238,13 +267,17 @@ vdso32.lds
197 vdso32.so.dbg
198 vdso64.lds
199 vdso64.so.dbg
200+vdsox32.lds
201+vdsox32-syms.lds
202 version.h*
203 vmImage
204 vmlinux
205 vmlinux-*
206 vmlinux.aout
207 vmlinux.bin.all
208+vmlinux.bin.bz2
209 vmlinux.lds
210+vmlinux.relocs
211 vmlinuz
212 voffset.h
213 vsyscall.lds
214@@ -252,9 +285,11 @@ vsyscall_32.lds
215 wanxlfw.inc
216 uImage
217 unifdef
218+utsrelease.h
219 wakeup.bin
220 wakeup.elf
221 wakeup.lds
222 zImage*
223 zconf.hash.c
224+zconf.lex.c
225 zoffset.h
226diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
227index a92c5eb..7530459 100644
228--- a/Documentation/kernel-parameters.txt
229+++ b/Documentation/kernel-parameters.txt
230@@ -2051,6 +2051,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
231 the specified number of seconds. This is to be used if
232 your oopses keep scrolling off the screen.
233
234+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
235+ virtualization environments that don't cope well with the
236+ expand down segment used by UDEREF on X86-32 or the frequent
237+ page table updates on X86-64.
238+
239+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
240+
241 pcbit= [HW,ISDN]
242
243 pcd. [PARIDE]
244diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
245index 13d6166..8c235b6 100644
246--- a/Documentation/sysctl/fs.txt
247+++ b/Documentation/sysctl/fs.txt
248@@ -163,16 +163,22 @@ This value can be used to query and set the core dump mode for setuid
249 or otherwise protected/tainted binaries. The modes are
250
251 0 - (default) - traditional behaviour. Any process which has changed
252- privilege levels or is execute only will not be dumped
253+ privilege levels or is execute only will not be dumped.
254 1 - (debug) - all processes dump core when possible. The core dump is
255 owned by the current user and no security is applied. This is
256 intended for system debugging situations only. Ptrace is unchecked.
257+ This is insecure as it allows regular users to examine the memory
258+ contents of privileged processes.
259 2 - (suidsafe) - any binary which normally would not be dumped is dumped
260- readable by root only. This allows the end user to remove
261- such a dump but not access it directly. For security reasons
262- core dumps in this mode will not overwrite one another or
263- other files. This mode is appropriate when administrators are
264- attempting to debug problems in a normal environment.
265+ anyway, but only if the "core_pattern" kernel sysctl is set to
266+ either a pipe handler or a fully qualified path. (For more details
267+ on this limitation, see CVE-2006-2451.) This mode is appropriate
268+ when administrators are attempting to debug problems in a normal
269+ environment, and either have a core dump pipe handler that knows
270+ to treat privileged core dumps with care, or specific directory
271+ defined for catching core dumps. If a core dump happens without
272+ a pipe handler or fully qualifid path, a message will be emitted
273+ to syslog warning about the lack of a correct setting.
274
275 ==============================================================
276
277diff --git a/Makefile b/Makefile
278index 6453ead..f5148e2 100644
279--- a/Makefile
280+++ b/Makefile
281@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
282
283 HOSTCC = gcc
284 HOSTCXX = g++
285-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
286-HOSTCXXFLAGS = -O2
287+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
288+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
289+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
290
291 # Decide whether to build built-in, modular, or both.
292 # Normally, just do built-in.
293@@ -404,8 +405,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
294 # Rules shared between *config targets and build targets
295
296 # Basic helpers built in scripts/
297-PHONY += scripts_basic
298-scripts_basic:
299+PHONY += scripts_basic gcc-plugins
300+scripts_basic: gcc-plugins
301 $(Q)$(MAKE) $(build)=scripts/basic
302 $(Q)rm -f .tmp_quiet_recordmcount
303
304@@ -561,6 +562,60 @@ else
305 KBUILD_CFLAGS += -O2
306 endif
307
308+ifndef DISABLE_PAX_PLUGINS
309+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
310+ifneq ($(PLUGINCC),)
311+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
312+ifndef CONFIG_UML
313+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
314+endif
315+endif
316+ifdef CONFIG_PAX_MEMORY_STACKLEAK
317+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
318+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
319+endif
320+ifdef CONFIG_KALLOCSTAT_PLUGIN
321+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
322+endif
323+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
324+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
325+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
326+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
327+endif
328+ifdef CONFIG_CHECKER_PLUGIN
329+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
330+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
331+endif
332+endif
333+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
334+ifdef CONFIG_PAX_SIZE_OVERFLOW
335+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
336+endif
337+ifdef CONFIG_PAX_LATENT_ENTROPY
338+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
339+endif
340+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
341+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
342+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
343+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
344+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
345+ifeq ($(KBUILD_EXTMOD),)
346+gcc-plugins:
347+ $(Q)$(MAKE) $(build)=tools/gcc
348+else
349+gcc-plugins: ;
350+endif
351+else
352+gcc-plugins:
353+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
354+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
355+else
356+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
357+endif
358+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
359+endif
360+endif
361+
362 include $(srctree)/arch/$(SRCARCH)/Makefile
363
364 ifdef CONFIG_READABLE_ASM
365@@ -715,7 +770,7 @@ export mod_strip_cmd
366
367
368 ifeq ($(KBUILD_EXTMOD),)
369-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
370+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
371
372 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
373 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
374@@ -762,6 +817,8 @@ endif
375
376 # The actual objects are generated when descending,
377 # make sure no implicit rule kicks in
378+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
379+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
380 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
381
382 # Handle descending into subdirectories listed in $(vmlinux-dirs)
383@@ -771,7 +828,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
384 # Error messages still appears in the original language
385
386 PHONY += $(vmlinux-dirs)
387-$(vmlinux-dirs): prepare scripts
388+$(vmlinux-dirs): gcc-plugins prepare scripts
389 $(Q)$(MAKE) $(build)=$@
390
391 # Store (new) KERNELRELASE string in include/config/kernel.release
392@@ -815,6 +872,7 @@ prepare0: archprepare FORCE
393 $(Q)$(MAKE) $(build)=.
394
395 # All the preparing..
396+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
397 prepare: prepare0
398
399 # Generate some files
400@@ -922,6 +980,8 @@ all: modules
401 # using awk while concatenating to the final file.
402
403 PHONY += modules
404+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
405+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
406 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
407 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
408 @$(kecho) ' Building modules, stage 2.';
409@@ -937,7 +997,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
410
411 # Target to prepare building external modules
412 PHONY += modules_prepare
413-modules_prepare: prepare scripts
414+modules_prepare: gcc-plugins prepare scripts
415
416 # Target to install modules
417 PHONY += modules_install
418@@ -994,7 +1054,7 @@ CLEAN_DIRS += $(MODVERDIR)
419 MRPROPER_DIRS += include/config usr/include include/generated \
420 arch/*/include/generated
421 MRPROPER_FILES += .config .config.old .version .old_version \
422- include/linux/version.h \
423+ include/linux/version.h tools/gcc/size_overflow_hash.h\
424 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
425
426 # clean - Delete most, but leave enough to build external modules
427@@ -1032,6 +1092,7 @@ distclean: mrproper
428 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
429 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
430 -o -name '.*.rej' \
431+ -o -name '.*.rej' -o -name '*.so' \
432 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
433 -type f -print | xargs rm -f
434
435@@ -1192,6 +1253,8 @@ PHONY += $(module-dirs) modules
436 $(module-dirs): crmodverdir $(objtree)/Module.symvers
437 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
438
439+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
440+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
441 modules: $(module-dirs)
442 @$(kecho) ' Building modules, stage 2.';
443 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
444@@ -1326,17 +1389,21 @@ else
445 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
446 endif
447
448-%.s: %.c prepare scripts FORCE
449+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
450+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
451+%.s: %.c gcc-plugins prepare scripts FORCE
452 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
453 %.i: %.c prepare scripts FORCE
454 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
455-%.o: %.c prepare scripts FORCE
456+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
457+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
458+%.o: %.c gcc-plugins prepare scripts FORCE
459 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
460 %.lst: %.c prepare scripts FORCE
461 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
462-%.s: %.S prepare scripts FORCE
463+%.s: %.S gcc-plugins prepare scripts FORCE
464 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
465-%.o: %.S prepare scripts FORCE
466+%.o: %.S gcc-plugins prepare scripts FORCE
467 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
468 %.symtypes: %.c prepare scripts FORCE
469 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
470@@ -1346,11 +1413,15 @@ endif
471 $(cmd_crmodverdir)
472 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
473 $(build)=$(build-dir)
474-%/: prepare scripts FORCE
475+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
476+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
477+%/: gcc-plugins prepare scripts FORCE
478 $(cmd_crmodverdir)
479 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
480 $(build)=$(build-dir)
481-%.ko: prepare scripts FORCE
482+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
483+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
484+%.ko: gcc-plugins prepare scripts FORCE
485 $(cmd_crmodverdir)
486 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
487 $(build)=$(build-dir) $(@:.ko=.o)
488diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
489index 3bb7ffe..347a54c 100644
490--- a/arch/alpha/include/asm/atomic.h
491+++ b/arch/alpha/include/asm/atomic.h
492@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
493 #define atomic_dec(v) atomic_sub(1,(v))
494 #define atomic64_dec(v) atomic64_sub(1,(v))
495
496+#define atomic64_read_unchecked(v) atomic64_read(v)
497+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
498+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
499+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
500+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
501+#define atomic64_inc_unchecked(v) atomic64_inc(v)
502+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
503+#define atomic64_dec_unchecked(v) atomic64_dec(v)
504+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
505+
506 #define smp_mb__before_atomic_dec() smp_mb()
507 #define smp_mb__after_atomic_dec() smp_mb()
508 #define smp_mb__before_atomic_inc() smp_mb()
509diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
510index ad368a9..fbe0f25 100644
511--- a/arch/alpha/include/asm/cache.h
512+++ b/arch/alpha/include/asm/cache.h
513@@ -4,19 +4,19 @@
514 #ifndef __ARCH_ALPHA_CACHE_H
515 #define __ARCH_ALPHA_CACHE_H
516
517+#include <linux/const.h>
518
519 /* Bytes per L1 (data) cache line. */
520 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
521-# define L1_CACHE_BYTES 64
522 # define L1_CACHE_SHIFT 6
523 #else
524 /* Both EV4 and EV5 are write-through, read-allocate,
525 direct-mapped, physical.
526 */
527-# define L1_CACHE_BYTES 32
528 # define L1_CACHE_SHIFT 5
529 #endif
530
531+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
532 #define SMP_CACHE_BYTES L1_CACHE_BYTES
533
534 #endif
535diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
536index 968d999..d36b2df 100644
537--- a/arch/alpha/include/asm/elf.h
538+++ b/arch/alpha/include/asm/elf.h
539@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
540
541 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
542
543+#ifdef CONFIG_PAX_ASLR
544+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
545+
546+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
547+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
548+#endif
549+
550 /* $0 is set by ld.so to a pointer to a function which might be
551 registered using atexit. This provides a mean for the dynamic
552 linker to call DT_FINI functions for shared libraries that have
553diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
554index bc2a0da..8ad11ee 100644
555--- a/arch/alpha/include/asm/pgalloc.h
556+++ b/arch/alpha/include/asm/pgalloc.h
557@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
558 pgd_set(pgd, pmd);
559 }
560
561+static inline void
562+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
563+{
564+ pgd_populate(mm, pgd, pmd);
565+}
566+
567 extern pgd_t *pgd_alloc(struct mm_struct *mm);
568
569 static inline void
570diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
571index 81a4342..348b927 100644
572--- a/arch/alpha/include/asm/pgtable.h
573+++ b/arch/alpha/include/asm/pgtable.h
574@@ -102,6 +102,17 @@ struct vm_area_struct;
575 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
576 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
577 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
578+
579+#ifdef CONFIG_PAX_PAGEEXEC
580+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
581+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
582+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
583+#else
584+# define PAGE_SHARED_NOEXEC PAGE_SHARED
585+# define PAGE_COPY_NOEXEC PAGE_COPY
586+# define PAGE_READONLY_NOEXEC PAGE_READONLY
587+#endif
588+
589 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
590
591 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
592diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
593index 2fd00b7..cfd5069 100644
594--- a/arch/alpha/kernel/module.c
595+++ b/arch/alpha/kernel/module.c
596@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
597
598 /* The small sections were sorted to the end of the segment.
599 The following should definitely cover them. */
600- gp = (u64)me->module_core + me->core_size - 0x8000;
601+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
602 got = sechdrs[me->arch.gotsecindex].sh_addr;
603
604 for (i = 0; i < n; i++) {
605diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
606index 98a1036..fb54ccf 100644
607--- a/arch/alpha/kernel/osf_sys.c
608+++ b/arch/alpha/kernel/osf_sys.c
609@@ -1312,7 +1312,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
610 /* At this point: (!vma || addr < vma->vm_end). */
611 if (limit - len < addr)
612 return -ENOMEM;
613- if (!vma || addr + len <= vma->vm_start)
614+ if (check_heap_stack_gap(vma, addr, len))
615 return addr;
616 addr = vma->vm_end;
617 vma = vma->vm_next;
618@@ -1348,6 +1348,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
619 merely specific addresses, but regions of memory -- perhaps
620 this feature should be incorporated into all ports? */
621
622+#ifdef CONFIG_PAX_RANDMMAP
623+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
624+#endif
625+
626 if (addr) {
627 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
628 if (addr != (unsigned long) -ENOMEM)
629@@ -1355,8 +1359,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
630 }
631
632 /* Next, try allocating at TASK_UNMAPPED_BASE. */
633- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
634- len, limit);
635+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
636+
637 if (addr != (unsigned long) -ENOMEM)
638 return addr;
639
640diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
641index 5eecab1..609abc0 100644
642--- a/arch/alpha/mm/fault.c
643+++ b/arch/alpha/mm/fault.c
644@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
645 __reload_thread(pcb);
646 }
647
648+#ifdef CONFIG_PAX_PAGEEXEC
649+/*
650+ * PaX: decide what to do with offenders (regs->pc = fault address)
651+ *
652+ * returns 1 when task should be killed
653+ * 2 when patched PLT trampoline was detected
654+ * 3 when unpatched PLT trampoline was detected
655+ */
656+static int pax_handle_fetch_fault(struct pt_regs *regs)
657+{
658+
659+#ifdef CONFIG_PAX_EMUPLT
660+ int err;
661+
662+ do { /* PaX: patched PLT emulation #1 */
663+ unsigned int ldah, ldq, jmp;
664+
665+ err = get_user(ldah, (unsigned int *)regs->pc);
666+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
667+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
668+
669+ if (err)
670+ break;
671+
672+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
673+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
674+ jmp == 0x6BFB0000U)
675+ {
676+ unsigned long r27, addr;
677+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
678+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
679+
680+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
681+ err = get_user(r27, (unsigned long *)addr);
682+ if (err)
683+ break;
684+
685+ regs->r27 = r27;
686+ regs->pc = r27;
687+ return 2;
688+ }
689+ } while (0);
690+
691+ do { /* PaX: patched PLT emulation #2 */
692+ unsigned int ldah, lda, br;
693+
694+ err = get_user(ldah, (unsigned int *)regs->pc);
695+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
696+ err |= get_user(br, (unsigned int *)(regs->pc+8));
697+
698+ if (err)
699+ break;
700+
701+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
702+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
703+ (br & 0xFFE00000U) == 0xC3E00000U)
704+ {
705+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
706+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
707+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
708+
709+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
710+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
711+ return 2;
712+ }
713+ } while (0);
714+
715+ do { /* PaX: unpatched PLT emulation */
716+ unsigned int br;
717+
718+ err = get_user(br, (unsigned int *)regs->pc);
719+
720+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
721+ unsigned int br2, ldq, nop, jmp;
722+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
723+
724+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
725+ err = get_user(br2, (unsigned int *)addr);
726+ err |= get_user(ldq, (unsigned int *)(addr+4));
727+ err |= get_user(nop, (unsigned int *)(addr+8));
728+ err |= get_user(jmp, (unsigned int *)(addr+12));
729+ err |= get_user(resolver, (unsigned long *)(addr+16));
730+
731+ if (err)
732+ break;
733+
734+ if (br2 == 0xC3600000U &&
735+ ldq == 0xA77B000CU &&
736+ nop == 0x47FF041FU &&
737+ jmp == 0x6B7B0000U)
738+ {
739+ regs->r28 = regs->pc+4;
740+ regs->r27 = addr+16;
741+ regs->pc = resolver;
742+ return 3;
743+ }
744+ }
745+ } while (0);
746+#endif
747+
748+ return 1;
749+}
750+
751+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
752+{
753+ unsigned long i;
754+
755+ printk(KERN_ERR "PAX: bytes at PC: ");
756+ for (i = 0; i < 5; i++) {
757+ unsigned int c;
758+ if (get_user(c, (unsigned int *)pc+i))
759+ printk(KERN_CONT "???????? ");
760+ else
761+ printk(KERN_CONT "%08x ", c);
762+ }
763+ printk("\n");
764+}
765+#endif
766
767 /*
768 * This routine handles page faults. It determines the address,
769@@ -130,8 +248,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
770 good_area:
771 si_code = SEGV_ACCERR;
772 if (cause < 0) {
773- if (!(vma->vm_flags & VM_EXEC))
774+ if (!(vma->vm_flags & VM_EXEC)) {
775+
776+#ifdef CONFIG_PAX_PAGEEXEC
777+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
778+ goto bad_area;
779+
780+ up_read(&mm->mmap_sem);
781+ switch (pax_handle_fetch_fault(regs)) {
782+
783+#ifdef CONFIG_PAX_EMUPLT
784+ case 2:
785+ case 3:
786+ return;
787+#endif
788+
789+ }
790+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
791+ do_group_exit(SIGKILL);
792+#else
793 goto bad_area;
794+#endif
795+
796+ }
797 } else if (!cause) {
798 /* Allow reads even for write-only mappings */
799 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
800diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
801index c79f61f..9ac0642 100644
802--- a/arch/arm/include/asm/atomic.h
803+++ b/arch/arm/include/asm/atomic.h
804@@ -17,17 +17,35 @@
805 #include <asm/barrier.h>
806 #include <asm/cmpxchg.h>
807
808+#ifdef CONFIG_GENERIC_ATOMIC64
809+#include <asm-generic/atomic64.h>
810+#endif
811+
812 #define ATOMIC_INIT(i) { (i) }
813
814 #ifdef __KERNEL__
815
816+#define _ASM_EXTABLE(from, to) \
817+" .pushsection __ex_table,\"a\"\n"\
818+" .align 3\n" \
819+" .long " #from ", " #to"\n" \
820+" .popsection"
821+
822 /*
823 * On ARM, ordinary assignment (str instruction) doesn't clear the local
824 * strex/ldrex monitor on some implementations. The reason we can use it for
825 * atomic_set() is the clrex or dummy strex done on every exception return.
826 */
827 #define atomic_read(v) (*(volatile int *)&(v)->counter)
828+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
829+{
830+ return v->counter;
831+}
832 #define atomic_set(v,i) (((v)->counter) = (i))
833+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
834+{
835+ v->counter = i;
836+}
837
838 #if __LINUX_ARM_ARCH__ >= 6
839
840@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
841 int result;
842
843 __asm__ __volatile__("@ atomic_add\n"
844+"1: ldrex %1, [%3]\n"
845+" adds %0, %1, %4\n"
846+
847+#ifdef CONFIG_PAX_REFCOUNT
848+" bvc 3f\n"
849+"2: bkpt 0xf103\n"
850+"3:\n"
851+#endif
852+
853+" strex %1, %0, [%3]\n"
854+" teq %1, #0\n"
855+" bne 1b"
856+
857+#ifdef CONFIG_PAX_REFCOUNT
858+"\n4:\n"
859+ _ASM_EXTABLE(2b, 4b)
860+#endif
861+
862+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
863+ : "r" (&v->counter), "Ir" (i)
864+ : "cc");
865+}
866+
867+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
868+{
869+ unsigned long tmp;
870+ int result;
871+
872+ __asm__ __volatile__("@ atomic_add_unchecked\n"
873 "1: ldrex %0, [%3]\n"
874 " add %0, %0, %4\n"
875 " strex %1, %0, [%3]\n"
876@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
877 smp_mb();
878
879 __asm__ __volatile__("@ atomic_add_return\n"
880+"1: ldrex %1, [%3]\n"
881+" adds %0, %1, %4\n"
882+
883+#ifdef CONFIG_PAX_REFCOUNT
884+" bvc 3f\n"
885+" mov %0, %1\n"
886+"2: bkpt 0xf103\n"
887+"3:\n"
888+#endif
889+
890+" strex %1, %0, [%3]\n"
891+" teq %1, #0\n"
892+" bne 1b"
893+
894+#ifdef CONFIG_PAX_REFCOUNT
895+"\n4:\n"
896+ _ASM_EXTABLE(2b, 4b)
897+#endif
898+
899+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
900+ : "r" (&v->counter), "Ir" (i)
901+ : "cc");
902+
903+ smp_mb();
904+
905+ return result;
906+}
907+
908+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
909+{
910+ unsigned long tmp;
911+ int result;
912+
913+ smp_mb();
914+
915+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
916 "1: ldrex %0, [%3]\n"
917 " add %0, %0, %4\n"
918 " strex %1, %0, [%3]\n"
919@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
920 int result;
921
922 __asm__ __volatile__("@ atomic_sub\n"
923+"1: ldrex %1, [%3]\n"
924+" subs %0, %1, %4\n"
925+
926+#ifdef CONFIG_PAX_REFCOUNT
927+" bvc 3f\n"
928+"2: bkpt 0xf103\n"
929+"3:\n"
930+#endif
931+
932+" strex %1, %0, [%3]\n"
933+" teq %1, #0\n"
934+" bne 1b"
935+
936+#ifdef CONFIG_PAX_REFCOUNT
937+"\n4:\n"
938+ _ASM_EXTABLE(2b, 4b)
939+#endif
940+
941+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
942+ : "r" (&v->counter), "Ir" (i)
943+ : "cc");
944+}
945+
946+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
947+{
948+ unsigned long tmp;
949+ int result;
950+
951+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
952 "1: ldrex %0, [%3]\n"
953 " sub %0, %0, %4\n"
954 " strex %1, %0, [%3]\n"
955@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
956 smp_mb();
957
958 __asm__ __volatile__("@ atomic_sub_return\n"
959-"1: ldrex %0, [%3]\n"
960-" sub %0, %0, %4\n"
961+"1: ldrex %1, [%3]\n"
962+" subs %0, %1, %4\n"
963+
964+#ifdef CONFIG_PAX_REFCOUNT
965+" bvc 3f\n"
966+" mov %0, %1\n"
967+"2: bkpt 0xf103\n"
968+"3:\n"
969+#endif
970+
971 " strex %1, %0, [%3]\n"
972 " teq %1, #0\n"
973 " bne 1b"
974+
975+#ifdef CONFIG_PAX_REFCOUNT
976+"\n4:\n"
977+ _ASM_EXTABLE(2b, 4b)
978+#endif
979+
980 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
981 : "r" (&v->counter), "Ir" (i)
982 : "cc");
983@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
984 return oldval;
985 }
986
987+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
988+{
989+ unsigned long oldval, res;
990+
991+ smp_mb();
992+
993+ do {
994+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
995+ "ldrex %1, [%3]\n"
996+ "mov %0, #0\n"
997+ "teq %1, %4\n"
998+ "strexeq %0, %5, [%3]\n"
999+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1000+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1001+ : "cc");
1002+ } while (res);
1003+
1004+ smp_mb();
1005+
1006+ return oldval;
1007+}
1008+
1009 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1010 {
1011 unsigned long tmp, tmp2;
1012@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1013
1014 return val;
1015 }
1016+
1017+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1018+{
1019+ return atomic_add_return(i, v);
1020+}
1021+
1022 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1023+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1024+{
1025+ (void) atomic_add_return(i, v);
1026+}
1027
1028 static inline int atomic_sub_return(int i, atomic_t *v)
1029 {
1030@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1031 return val;
1032 }
1033 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1034+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1035+{
1036+ (void) atomic_sub_return(i, v);
1037+}
1038
1039 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1040 {
1041@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1042 return ret;
1043 }
1044
1045+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1046+{
1047+ return atomic_cmpxchg(v, old, new);
1048+}
1049+
1050 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1051 {
1052 unsigned long flags;
1053@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1054 #endif /* __LINUX_ARM_ARCH__ */
1055
1056 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1057+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1058+{
1059+ return xchg(&v->counter, new);
1060+}
1061
1062 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1063 {
1064@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1065 }
1066
1067 #define atomic_inc(v) atomic_add(1, v)
1068+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1069+{
1070+ atomic_add_unchecked(1, v);
1071+}
1072 #define atomic_dec(v) atomic_sub(1, v)
1073+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1074+{
1075+ atomic_sub_unchecked(1, v);
1076+}
1077
1078 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1079+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1080+{
1081+ return atomic_add_return_unchecked(1, v) == 0;
1082+}
1083 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1084 #define atomic_inc_return(v) (atomic_add_return(1, v))
1085+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1086+{
1087+ return atomic_add_return_unchecked(1, v);
1088+}
1089 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1090 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1091
1092@@ -241,6 +428,14 @@ typedef struct {
1093 u64 __aligned(8) counter;
1094 } atomic64_t;
1095
1096+#ifdef CONFIG_PAX_REFCOUNT
1097+typedef struct {
1098+ u64 __aligned(8) counter;
1099+} atomic64_unchecked_t;
1100+#else
1101+typedef atomic64_t atomic64_unchecked_t;
1102+#endif
1103+
1104 #define ATOMIC64_INIT(i) { (i) }
1105
1106 static inline u64 atomic64_read(const atomic64_t *v)
1107@@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1108 return result;
1109 }
1110
1111+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1112+{
1113+ u64 result;
1114+
1115+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1116+" ldrexd %0, %H0, [%1]"
1117+ : "=&r" (result)
1118+ : "r" (&v->counter), "Qo" (v->counter)
1119+ );
1120+
1121+ return result;
1122+}
1123+
1124 static inline void atomic64_set(atomic64_t *v, u64 i)
1125 {
1126 u64 tmp;
1127@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1128 : "cc");
1129 }
1130
1131+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1132+{
1133+ u64 tmp;
1134+
1135+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1136+"1: ldrexd %0, %H0, [%2]\n"
1137+" strexd %0, %3, %H3, [%2]\n"
1138+" teq %0, #0\n"
1139+" bne 1b"
1140+ : "=&r" (tmp), "=Qo" (v->counter)
1141+ : "r" (&v->counter), "r" (i)
1142+ : "cc");
1143+}
1144+
1145 static inline void atomic64_add(u64 i, atomic64_t *v)
1146 {
1147 u64 result;
1148@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1149 __asm__ __volatile__("@ atomic64_add\n"
1150 "1: ldrexd %0, %H0, [%3]\n"
1151 " adds %0, %0, %4\n"
1152+" adcs %H0, %H0, %H4\n"
1153+
1154+#ifdef CONFIG_PAX_REFCOUNT
1155+" bvc 3f\n"
1156+"2: bkpt 0xf103\n"
1157+"3:\n"
1158+#endif
1159+
1160+" strexd %1, %0, %H0, [%3]\n"
1161+" teq %1, #0\n"
1162+" bne 1b"
1163+
1164+#ifdef CONFIG_PAX_REFCOUNT
1165+"\n4:\n"
1166+ _ASM_EXTABLE(2b, 4b)
1167+#endif
1168+
1169+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1170+ : "r" (&v->counter), "r" (i)
1171+ : "cc");
1172+}
1173+
1174+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1175+{
1176+ u64 result;
1177+ unsigned long tmp;
1178+
1179+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1180+"1: ldrexd %0, %H0, [%3]\n"
1181+" adds %0, %0, %4\n"
1182 " adc %H0, %H0, %H4\n"
1183 " strexd %1, %0, %H0, [%3]\n"
1184 " teq %1, #0\n"
1185@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1186
1187 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1188 {
1189- u64 result;
1190- unsigned long tmp;
1191+ u64 result, tmp;
1192
1193 smp_mb();
1194
1195 __asm__ __volatile__("@ atomic64_add_return\n"
1196+"1: ldrexd %1, %H1, [%3]\n"
1197+" adds %0, %1, %4\n"
1198+" adcs %H0, %H1, %H4\n"
1199+
1200+#ifdef CONFIG_PAX_REFCOUNT
1201+" bvc 3f\n"
1202+" mov %0, %1\n"
1203+" mov %H0, %H1\n"
1204+"2: bkpt 0xf103\n"
1205+"3:\n"
1206+#endif
1207+
1208+" strexd %1, %0, %H0, [%3]\n"
1209+" teq %1, #0\n"
1210+" bne 1b"
1211+
1212+#ifdef CONFIG_PAX_REFCOUNT
1213+"\n4:\n"
1214+ _ASM_EXTABLE(2b, 4b)
1215+#endif
1216+
1217+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1218+ : "r" (&v->counter), "r" (i)
1219+ : "cc");
1220+
1221+ smp_mb();
1222+
1223+ return result;
1224+}
1225+
1226+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1227+{
1228+ u64 result;
1229+ unsigned long tmp;
1230+
1231+ smp_mb();
1232+
1233+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1234 "1: ldrexd %0, %H0, [%3]\n"
1235 " adds %0, %0, %4\n"
1236 " adc %H0, %H0, %H4\n"
1237@@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1238 __asm__ __volatile__("@ atomic64_sub\n"
1239 "1: ldrexd %0, %H0, [%3]\n"
1240 " subs %0, %0, %4\n"
1241+" sbcs %H0, %H0, %H4\n"
1242+
1243+#ifdef CONFIG_PAX_REFCOUNT
1244+" bvc 3f\n"
1245+"2: bkpt 0xf103\n"
1246+"3:\n"
1247+#endif
1248+
1249+" strexd %1, %0, %H0, [%3]\n"
1250+" teq %1, #0\n"
1251+" bne 1b"
1252+
1253+#ifdef CONFIG_PAX_REFCOUNT
1254+"\n4:\n"
1255+ _ASM_EXTABLE(2b, 4b)
1256+#endif
1257+
1258+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1259+ : "r" (&v->counter), "r" (i)
1260+ : "cc");
1261+}
1262+
1263+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1264+{
1265+ u64 result;
1266+ unsigned long tmp;
1267+
1268+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1269+"1: ldrexd %0, %H0, [%3]\n"
1270+" subs %0, %0, %4\n"
1271 " sbc %H0, %H0, %H4\n"
1272 " strexd %1, %0, %H0, [%3]\n"
1273 " teq %1, #0\n"
1274@@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1275
1276 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1277 {
1278- u64 result;
1279- unsigned long tmp;
1280+ u64 result, tmp;
1281
1282 smp_mb();
1283
1284 __asm__ __volatile__("@ atomic64_sub_return\n"
1285-"1: ldrexd %0, %H0, [%3]\n"
1286-" subs %0, %0, %4\n"
1287-" sbc %H0, %H0, %H4\n"
1288+"1: ldrexd %1, %H1, [%3]\n"
1289+" subs %0, %1, %4\n"
1290+" sbcs %H0, %H1, %H4\n"
1291+
1292+#ifdef CONFIG_PAX_REFCOUNT
1293+" bvc 3f\n"
1294+" mov %0, %1\n"
1295+" mov %H0, %H1\n"
1296+"2: bkpt 0xf103\n"
1297+"3:\n"
1298+#endif
1299+
1300 " strexd %1, %0, %H0, [%3]\n"
1301 " teq %1, #0\n"
1302 " bne 1b"
1303+
1304+#ifdef CONFIG_PAX_REFCOUNT
1305+"\n4:\n"
1306+ _ASM_EXTABLE(2b, 4b)
1307+#endif
1308+
1309 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1310 : "r" (&v->counter), "r" (i)
1311 : "cc");
1312@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1313 return oldval;
1314 }
1315
1316+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1317+{
1318+ u64 oldval;
1319+ unsigned long res;
1320+
1321+ smp_mb();
1322+
1323+ do {
1324+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1325+ "ldrexd %1, %H1, [%3]\n"
1326+ "mov %0, #0\n"
1327+ "teq %1, %4\n"
1328+ "teqeq %H1, %H4\n"
1329+ "strexdeq %0, %5, %H5, [%3]"
1330+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1331+ : "r" (&ptr->counter), "r" (old), "r" (new)
1332+ : "cc");
1333+ } while (res);
1334+
1335+ smp_mb();
1336+
1337+ return oldval;
1338+}
1339+
1340 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1341 {
1342 u64 result;
1343@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1344
1345 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1346 {
1347- u64 result;
1348- unsigned long tmp;
1349+ u64 result, tmp;
1350
1351 smp_mb();
1352
1353 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1354-"1: ldrexd %0, %H0, [%3]\n"
1355-" subs %0, %0, #1\n"
1356-" sbc %H0, %H0, #0\n"
1357+"1: ldrexd %1, %H1, [%3]\n"
1358+" subs %0, %1, #1\n"
1359+" sbcs %H0, %H1, #0\n"
1360+
1361+#ifdef CONFIG_PAX_REFCOUNT
1362+" bvc 3f\n"
1363+" mov %0, %1\n"
1364+" mov %H0, %H1\n"
1365+"2: bkpt 0xf103\n"
1366+"3:\n"
1367+#endif
1368+
1369 " teq %H0, #0\n"
1370-" bmi 2f\n"
1371+" bmi 4f\n"
1372 " strexd %1, %0, %H0, [%3]\n"
1373 " teq %1, #0\n"
1374 " bne 1b\n"
1375-"2:"
1376+"4:\n"
1377+
1378+#ifdef CONFIG_PAX_REFCOUNT
1379+ _ASM_EXTABLE(2b, 4b)
1380+#endif
1381+
1382 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1383 : "r" (&v->counter)
1384 : "cc");
1385@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1386 " teq %0, %5\n"
1387 " teqeq %H0, %H5\n"
1388 " moveq %1, #0\n"
1389-" beq 2f\n"
1390+" beq 4f\n"
1391 " adds %0, %0, %6\n"
1392-" adc %H0, %H0, %H6\n"
1393+" adcs %H0, %H0, %H6\n"
1394+
1395+#ifdef CONFIG_PAX_REFCOUNT
1396+" bvc 3f\n"
1397+"2: bkpt 0xf103\n"
1398+"3:\n"
1399+#endif
1400+
1401 " strexd %2, %0, %H0, [%4]\n"
1402 " teq %2, #0\n"
1403 " bne 1b\n"
1404-"2:"
1405+"4:\n"
1406+
1407+#ifdef CONFIG_PAX_REFCOUNT
1408+ _ASM_EXTABLE(2b, 4b)
1409+#endif
1410+
1411 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1412 : "r" (&v->counter), "r" (u), "r" (a)
1413 : "cc");
1414@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1415
1416 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1417 #define atomic64_inc(v) atomic64_add(1LL, (v))
1418+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1419 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1420+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1421 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1422 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1423 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1424+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1425 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1426 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1427 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1428diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1429index 75fe66b..2255c86 100644
1430--- a/arch/arm/include/asm/cache.h
1431+++ b/arch/arm/include/asm/cache.h
1432@@ -4,8 +4,10 @@
1433 #ifndef __ASMARM_CACHE_H
1434 #define __ASMARM_CACHE_H
1435
1436+#include <linux/const.h>
1437+
1438 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1439-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1440+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1441
1442 /*
1443 * Memory returned by kmalloc() may be used for DMA, so we must make
1444diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1445index e4448e1..7bc86b7 100644
1446--- a/arch/arm/include/asm/cacheflush.h
1447+++ b/arch/arm/include/asm/cacheflush.h
1448@@ -108,7 +108,7 @@ struct cpu_cache_fns {
1449 void (*dma_unmap_area)(const void *, size_t, int);
1450
1451 void (*dma_flush_range)(const void *, const void *);
1452-};
1453+} __no_const;
1454
1455 /*
1456 * Select the calling method
1457diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1458index 7eb18c1..e38b6d2 100644
1459--- a/arch/arm/include/asm/cmpxchg.h
1460+++ b/arch/arm/include/asm/cmpxchg.h
1461@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1462
1463 #define xchg(ptr,x) \
1464 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1465+#define xchg_unchecked(ptr,x) \
1466+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1467
1468 #include <asm-generic/cmpxchg-local.h>
1469
1470diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1471index 38050b1..9d90e8b 100644
1472--- a/arch/arm/include/asm/elf.h
1473+++ b/arch/arm/include/asm/elf.h
1474@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1475 the loader. We need to make sure that it is out of the way of the program
1476 that it will "exec", and that there is sufficient room for the brk. */
1477
1478-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1479+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1480+
1481+#ifdef CONFIG_PAX_ASLR
1482+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1483+
1484+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1485+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1486+#endif
1487
1488 /* When the program starts, a1 contains a pointer to a function to be
1489 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1490@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1491 extern void elf_set_personality(const struct elf32_hdr *);
1492 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1493
1494-struct mm_struct;
1495-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1496-#define arch_randomize_brk arch_randomize_brk
1497-
1498 #endif
1499diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1500index e51b1e8..32a3113 100644
1501--- a/arch/arm/include/asm/kmap_types.h
1502+++ b/arch/arm/include/asm/kmap_types.h
1503@@ -21,6 +21,7 @@ enum km_type {
1504 KM_L1_CACHE,
1505 KM_L2_CACHE,
1506 KM_KDB,
1507+ KM_CLEARPAGE,
1508 KM_TYPE_NR
1509 };
1510
1511diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1512index 53426c6..c7baff3 100644
1513--- a/arch/arm/include/asm/outercache.h
1514+++ b/arch/arm/include/asm/outercache.h
1515@@ -35,7 +35,7 @@ struct outer_cache_fns {
1516 #endif
1517 void (*set_debug)(unsigned long);
1518 void (*resume)(void);
1519-};
1520+} __no_const;
1521
1522 #ifdef CONFIG_OUTER_CACHE
1523
1524diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1525index ecf9019..b71d9a1 100644
1526--- a/arch/arm/include/asm/page.h
1527+++ b/arch/arm/include/asm/page.h
1528@@ -114,7 +114,7 @@ struct cpu_user_fns {
1529 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1530 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1531 unsigned long vaddr, struct vm_area_struct *vma);
1532-};
1533+} __no_const;
1534
1535 #ifdef MULTI_USER
1536 extern struct cpu_user_fns cpu_user;
1537diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1538index 943504f..bf8d667 100644
1539--- a/arch/arm/include/asm/pgalloc.h
1540+++ b/arch/arm/include/asm/pgalloc.h
1541@@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1542 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1543 }
1544
1545+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1546+{
1547+ pud_populate(mm, pud, pmd);
1548+}
1549+
1550 #else /* !CONFIG_ARM_LPAE */
1551
1552 /*
1553@@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1554 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1555 #define pmd_free(mm, pmd) do { } while (0)
1556 #define pud_populate(mm,pmd,pte) BUG()
1557+#define pud_populate_kernel(mm,pmd,pte) BUG()
1558
1559 #endif /* CONFIG_ARM_LPAE */
1560
1561diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1562index af7b0bd..6750a8c 100644
1563--- a/arch/arm/include/asm/thread_info.h
1564+++ b/arch/arm/include/asm/thread_info.h
1565@@ -148,6 +148,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1566 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
1567 #define TIF_SYSCALL_TRACE 8
1568 #define TIF_SYSCALL_AUDIT 9
1569+
1570+/* within 8 bits of TIF_SYSCALL_TRACE
1571+ to meet flexible second operand requirements
1572+*/
1573+#define TIF_GRSEC_SETXID 10
1574+
1575 #define TIF_POLLING_NRFLAG 16
1576 #define TIF_USING_IWMMXT 17
1577 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1578@@ -163,9 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1579 #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
1580 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1581 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1582+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1583
1584 /* Checks for any syscall work in entry-common.S */
1585-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
1586+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1587+ _TIF_GRSEC_SETXID)
1588
1589 /*
1590 * Change these and you break ASM code in entry-common.S
1591diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1592index 71f6536..602f279 100644
1593--- a/arch/arm/include/asm/uaccess.h
1594+++ b/arch/arm/include/asm/uaccess.h
1595@@ -22,6 +22,8 @@
1596 #define VERIFY_READ 0
1597 #define VERIFY_WRITE 1
1598
1599+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1600+
1601 /*
1602 * The exception table consists of pairs of addresses: the first is the
1603 * address of an instruction that is allowed to fault, and the second is
1604@@ -387,8 +389,23 @@ do { \
1605
1606
1607 #ifdef CONFIG_MMU
1608-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1609-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1610+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1611+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1612+
1613+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1614+{
1615+ if (!__builtin_constant_p(n))
1616+ check_object_size(to, n, false);
1617+ return ___copy_from_user(to, from, n);
1618+}
1619+
1620+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1621+{
1622+ if (!__builtin_constant_p(n))
1623+ check_object_size(from, n, true);
1624+ return ___copy_to_user(to, from, n);
1625+}
1626+
1627 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1628 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1629 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1630@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1631
1632 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1633 {
1634+ if ((long)n < 0)
1635+ return n;
1636+
1637 if (access_ok(VERIFY_READ, from, n))
1638 n = __copy_from_user(to, from, n);
1639 else /* security hole - plug it */
1640@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1641
1642 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1643 {
1644+ if ((long)n < 0)
1645+ return n;
1646+
1647 if (access_ok(VERIFY_WRITE, to, n))
1648 n = __copy_to_user(to, from, n);
1649 return n;
1650diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1651index b57c75e..ed2d6b2 100644
1652--- a/arch/arm/kernel/armksyms.c
1653+++ b/arch/arm/kernel/armksyms.c
1654@@ -94,8 +94,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1655 #ifdef CONFIG_MMU
1656 EXPORT_SYMBOL(copy_page);
1657
1658-EXPORT_SYMBOL(__copy_from_user);
1659-EXPORT_SYMBOL(__copy_to_user);
1660+EXPORT_SYMBOL(___copy_from_user);
1661+EXPORT_SYMBOL(___copy_to_user);
1662 EXPORT_SYMBOL(__clear_user);
1663
1664 EXPORT_SYMBOL(__get_user_1);
1665diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1666index 693b744..e684262 100644
1667--- a/arch/arm/kernel/process.c
1668+++ b/arch/arm/kernel/process.c
1669@@ -28,7 +28,6 @@
1670 #include <linux/tick.h>
1671 #include <linux/utsname.h>
1672 #include <linux/uaccess.h>
1673-#include <linux/random.h>
1674 #include <linux/hw_breakpoint.h>
1675 #include <linux/cpuidle.h>
1676
1677@@ -256,9 +255,10 @@ void machine_power_off(void)
1678 machine_shutdown();
1679 if (pm_power_off)
1680 pm_power_off();
1681+ BUG();
1682 }
1683
1684-void machine_restart(char *cmd)
1685+__noreturn void machine_restart(char *cmd)
1686 {
1687 machine_shutdown();
1688
1689@@ -501,12 +501,6 @@ unsigned long get_wchan(struct task_struct *p)
1690 return 0;
1691 }
1692
1693-unsigned long arch_randomize_brk(struct mm_struct *mm)
1694-{
1695- unsigned long range_end = mm->brk + 0x02000000;
1696- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1697-}
1698-
1699 #ifdef CONFIG_MMU
1700 /*
1701 * The vectors page is always readable from user space for the
1702diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
1703index 14e3826..d832d89 100644
1704--- a/arch/arm/kernel/ptrace.c
1705+++ b/arch/arm/kernel/ptrace.c
1706@@ -907,10 +907,19 @@ long arch_ptrace(struct task_struct *child, long request,
1707 return ret;
1708 }
1709
1710+#ifdef CONFIG_GRKERNSEC_SETXID
1711+extern void gr_delayed_cred_worker(void);
1712+#endif
1713+
1714 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
1715 {
1716 unsigned long ip;
1717
1718+#ifdef CONFIG_GRKERNSEC_SETXID
1719+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
1720+ gr_delayed_cred_worker();
1721+#endif
1722+
1723 if (why)
1724 audit_syscall_exit(regs);
1725 else
1726diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1727index e15d83b..8c466dd 100644
1728--- a/arch/arm/kernel/setup.c
1729+++ b/arch/arm/kernel/setup.c
1730@@ -112,13 +112,13 @@ struct processor processor __read_mostly;
1731 struct cpu_tlb_fns cpu_tlb __read_mostly;
1732 #endif
1733 #ifdef MULTI_USER
1734-struct cpu_user_fns cpu_user __read_mostly;
1735+struct cpu_user_fns cpu_user __read_only;
1736 #endif
1737 #ifdef MULTI_CACHE
1738-struct cpu_cache_fns cpu_cache __read_mostly;
1739+struct cpu_cache_fns cpu_cache __read_only;
1740 #endif
1741 #ifdef CONFIG_OUTER_CACHE
1742-struct outer_cache_fns outer_cache __read_mostly;
1743+struct outer_cache_fns outer_cache __read_only;
1744 EXPORT_SYMBOL(outer_cache);
1745 #endif
1746
1747diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1748index c7cae6b..e1e523c 100644
1749--- a/arch/arm/kernel/traps.c
1750+++ b/arch/arm/kernel/traps.c
1751@@ -264,6 +264,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1752
1753 static DEFINE_RAW_SPINLOCK(die_lock);
1754
1755+extern void gr_handle_kernel_exploit(void);
1756+
1757 /*
1758 * This function is protected against re-entrancy.
1759 */
1760@@ -296,6 +298,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1761 panic("Fatal exception in interrupt");
1762 if (panic_on_oops)
1763 panic("Fatal exception");
1764+
1765+ gr_handle_kernel_exploit();
1766+
1767 if (ret != NOTIFY_STOP)
1768 do_exit(SIGSEGV);
1769 }
1770diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1771index 66a477a..bee61d3 100644
1772--- a/arch/arm/lib/copy_from_user.S
1773+++ b/arch/arm/lib/copy_from_user.S
1774@@ -16,7 +16,7 @@
1775 /*
1776 * Prototype:
1777 *
1778- * size_t __copy_from_user(void *to, const void *from, size_t n)
1779+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
1780 *
1781 * Purpose:
1782 *
1783@@ -84,11 +84,11 @@
1784
1785 .text
1786
1787-ENTRY(__copy_from_user)
1788+ENTRY(___copy_from_user)
1789
1790 #include "copy_template.S"
1791
1792-ENDPROC(__copy_from_user)
1793+ENDPROC(___copy_from_user)
1794
1795 .pushsection .fixup,"ax"
1796 .align 0
1797diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1798index 6ee2f67..d1cce76 100644
1799--- a/arch/arm/lib/copy_page.S
1800+++ b/arch/arm/lib/copy_page.S
1801@@ -10,6 +10,7 @@
1802 * ASM optimised string functions
1803 */
1804 #include <linux/linkage.h>
1805+#include <linux/const.h>
1806 #include <asm/assembler.h>
1807 #include <asm/asm-offsets.h>
1808 #include <asm/cache.h>
1809diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1810index d066df6..df28194 100644
1811--- a/arch/arm/lib/copy_to_user.S
1812+++ b/arch/arm/lib/copy_to_user.S
1813@@ -16,7 +16,7 @@
1814 /*
1815 * Prototype:
1816 *
1817- * size_t __copy_to_user(void *to, const void *from, size_t n)
1818+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
1819 *
1820 * Purpose:
1821 *
1822@@ -88,11 +88,11 @@
1823 .text
1824
1825 ENTRY(__copy_to_user_std)
1826-WEAK(__copy_to_user)
1827+WEAK(___copy_to_user)
1828
1829 #include "copy_template.S"
1830
1831-ENDPROC(__copy_to_user)
1832+ENDPROC(___copy_to_user)
1833 ENDPROC(__copy_to_user_std)
1834
1835 .pushsection .fixup,"ax"
1836diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1837index 025f742..8432b08 100644
1838--- a/arch/arm/lib/uaccess_with_memcpy.c
1839+++ b/arch/arm/lib/uaccess_with_memcpy.c
1840@@ -104,7 +104,7 @@ out:
1841 }
1842
1843 unsigned long
1844-__copy_to_user(void __user *to, const void *from, unsigned long n)
1845+___copy_to_user(void __user *to, const void *from, unsigned long n)
1846 {
1847 /*
1848 * This test is stubbed out of the main function above to keep
1849diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
1850index f261cd2..4ae63fb 100644
1851--- a/arch/arm/mach-kirkwood/common.c
1852+++ b/arch/arm/mach-kirkwood/common.c
1853@@ -128,7 +128,7 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
1854 clk_gate_ops.disable(hw);
1855 }
1856
1857-static struct clk_ops clk_gate_fn_ops;
1858+static clk_ops_no_const clk_gate_fn_ops;
1859
1860 static struct clk __init *clk_register_gate_fn(struct device *dev,
1861 const char *name,
1862diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1863index 2c5d0ed..7d9099c 100644
1864--- a/arch/arm/mach-omap2/board-n8x0.c
1865+++ b/arch/arm/mach-omap2/board-n8x0.c
1866@@ -594,7 +594,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1867 }
1868 #endif
1869
1870-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1871+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1872 .late_init = n8x0_menelaus_late_init,
1873 };
1874
1875diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1876index c3bd834..e81ef02 100644
1877--- a/arch/arm/mm/fault.c
1878+++ b/arch/arm/mm/fault.c
1879@@ -174,6 +174,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1880 }
1881 #endif
1882
1883+#ifdef CONFIG_PAX_PAGEEXEC
1884+ if (fsr & FSR_LNX_PF) {
1885+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1886+ do_group_exit(SIGKILL);
1887+ }
1888+#endif
1889+
1890 tsk->thread.address = addr;
1891 tsk->thread.error_code = fsr;
1892 tsk->thread.trap_no = 14;
1893@@ -397,6 +404,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1894 }
1895 #endif /* CONFIG_MMU */
1896
1897+#ifdef CONFIG_PAX_PAGEEXEC
1898+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1899+{
1900+ long i;
1901+
1902+ printk(KERN_ERR "PAX: bytes at PC: ");
1903+ for (i = 0; i < 20; i++) {
1904+ unsigned char c;
1905+ if (get_user(c, (__force unsigned char __user *)pc+i))
1906+ printk(KERN_CONT "?? ");
1907+ else
1908+ printk(KERN_CONT "%02x ", c);
1909+ }
1910+ printk("\n");
1911+
1912+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1913+ for (i = -1; i < 20; i++) {
1914+ unsigned long c;
1915+ if (get_user(c, (__force unsigned long __user *)sp+i))
1916+ printk(KERN_CONT "???????? ");
1917+ else
1918+ printk(KERN_CONT "%08lx ", c);
1919+ }
1920+ printk("\n");
1921+}
1922+#endif
1923+
1924 /*
1925 * First Level Translation Fault Handler
1926 *
1927@@ -574,6 +608,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1928 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1929 struct siginfo info;
1930
1931+#ifdef CONFIG_PAX_REFCOUNT
1932+ if (fsr_fs(ifsr) == 2) {
1933+ unsigned int bkpt;
1934+
1935+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1936+ current->thread.error_code = ifsr;
1937+ current->thread.trap_no = 0;
1938+ pax_report_refcount_overflow(regs);
1939+ fixup_exception(regs);
1940+ return;
1941+ }
1942+ }
1943+#endif
1944+
1945 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1946 return;
1947
1948diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1949index ce8cb19..3ec539d 100644
1950--- a/arch/arm/mm/mmap.c
1951+++ b/arch/arm/mm/mmap.c
1952@@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1953 if (len > TASK_SIZE)
1954 return -ENOMEM;
1955
1956+#ifdef CONFIG_PAX_RANDMMAP
1957+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1958+#endif
1959+
1960 if (addr) {
1961 if (do_align)
1962 addr = COLOUR_ALIGN(addr, pgoff);
1963@@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1964 addr = PAGE_ALIGN(addr);
1965
1966 vma = find_vma(mm, addr);
1967- if (TASK_SIZE - len >= addr &&
1968- (!vma || addr + len <= vma->vm_start))
1969+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1970 return addr;
1971 }
1972 if (len > mm->cached_hole_size) {
1973- start_addr = addr = mm->free_area_cache;
1974+ start_addr = addr = mm->free_area_cache;
1975 } else {
1976- start_addr = addr = mm->mmap_base;
1977- mm->cached_hole_size = 0;
1978+ start_addr = addr = mm->mmap_base;
1979+ mm->cached_hole_size = 0;
1980 }
1981
1982 full_search:
1983@@ -124,14 +127,14 @@ full_search:
1984 * Start a new search - just in case we missed
1985 * some holes.
1986 */
1987- if (start_addr != TASK_UNMAPPED_BASE) {
1988- start_addr = addr = TASK_UNMAPPED_BASE;
1989+ if (start_addr != mm->mmap_base) {
1990+ start_addr = addr = mm->mmap_base;
1991 mm->cached_hole_size = 0;
1992 goto full_search;
1993 }
1994 return -ENOMEM;
1995 }
1996- if (!vma || addr + len <= vma->vm_start) {
1997+ if (check_heap_stack_gap(vma, addr, len)) {
1998 /*
1999 * Remember the place where we stopped the search:
2000 */
2001@@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2002
2003 if (mmap_is_legacy()) {
2004 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
2005+
2006+#ifdef CONFIG_PAX_RANDMMAP
2007+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2008+ mm->mmap_base += mm->delta_mmap;
2009+#endif
2010+
2011 mm->get_unmapped_area = arch_get_unmapped_area;
2012 mm->unmap_area = arch_unmap_area;
2013 } else {
2014 mm->mmap_base = mmap_base(random_factor);
2015+
2016+#ifdef CONFIG_PAX_RANDMMAP
2017+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2018+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2019+#endif
2020+
2021 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2022 mm->unmap_area = arch_unmap_area_topdown;
2023 }
2024diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
2025index fd556f7..af2e7d2 100644
2026--- a/arch/arm/plat-orion/include/plat/addr-map.h
2027+++ b/arch/arm/plat-orion/include/plat/addr-map.h
2028@@ -26,7 +26,7 @@ struct orion_addr_map_cfg {
2029 value in bridge_virt_base */
2030 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
2031 const int win);
2032-};
2033+} __no_const;
2034
2035 /*
2036 * Information needed to setup one address mapping.
2037diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
2038index 71a6827..e7fbc23 100644
2039--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
2040+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
2041@@ -43,7 +43,7 @@ struct samsung_dma_ops {
2042 int (*started)(unsigned ch);
2043 int (*flush)(unsigned ch);
2044 int (*stop)(unsigned ch);
2045-};
2046+} __no_const;
2047
2048 extern void *samsung_dmadev_get_ops(void);
2049 extern void *s3c_dma_get_ops(void);
2050diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
2051index 5f28cae..3d23723 100644
2052--- a/arch/arm/plat-samsung/include/plat/ehci.h
2053+++ b/arch/arm/plat-samsung/include/plat/ehci.h
2054@@ -14,7 +14,7 @@
2055 struct s5p_ehci_platdata {
2056 int (*phy_init)(struct platform_device *pdev, int type);
2057 int (*phy_exit)(struct platform_device *pdev, int type);
2058-};
2059+} __no_const;
2060
2061 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
2062
2063diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2064index c3a58a1..78fbf54 100644
2065--- a/arch/avr32/include/asm/cache.h
2066+++ b/arch/avr32/include/asm/cache.h
2067@@ -1,8 +1,10 @@
2068 #ifndef __ASM_AVR32_CACHE_H
2069 #define __ASM_AVR32_CACHE_H
2070
2071+#include <linux/const.h>
2072+
2073 #define L1_CACHE_SHIFT 5
2074-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2075+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2076
2077 /*
2078 * Memory returned by kmalloc() may be used for DMA, so we must make
2079diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2080index 3b3159b..425ea94 100644
2081--- a/arch/avr32/include/asm/elf.h
2082+++ b/arch/avr32/include/asm/elf.h
2083@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2084 the loader. We need to make sure that it is out of the way of the program
2085 that it will "exec", and that there is sufficient room for the brk. */
2086
2087-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2088+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2089
2090+#ifdef CONFIG_PAX_ASLR
2091+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2092+
2093+#define PAX_DELTA_MMAP_LEN 15
2094+#define PAX_DELTA_STACK_LEN 15
2095+#endif
2096
2097 /* This yields a mask that user programs can use to figure out what
2098 instruction set this CPU supports. This could be done in user space,
2099diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2100index b7f5c68..556135c 100644
2101--- a/arch/avr32/include/asm/kmap_types.h
2102+++ b/arch/avr32/include/asm/kmap_types.h
2103@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
2104 D(11) KM_IRQ1,
2105 D(12) KM_SOFTIRQ0,
2106 D(13) KM_SOFTIRQ1,
2107-D(14) KM_TYPE_NR
2108+D(14) KM_CLEARPAGE,
2109+D(15) KM_TYPE_NR
2110 };
2111
2112 #undef D
2113diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2114index f7040a1..db9f300 100644
2115--- a/arch/avr32/mm/fault.c
2116+++ b/arch/avr32/mm/fault.c
2117@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2118
2119 int exception_trace = 1;
2120
2121+#ifdef CONFIG_PAX_PAGEEXEC
2122+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2123+{
2124+ unsigned long i;
2125+
2126+ printk(KERN_ERR "PAX: bytes at PC: ");
2127+ for (i = 0; i < 20; i++) {
2128+ unsigned char c;
2129+ if (get_user(c, (unsigned char *)pc+i))
2130+ printk(KERN_CONT "???????? ");
2131+ else
2132+ printk(KERN_CONT "%02x ", c);
2133+ }
2134+ printk("\n");
2135+}
2136+#endif
2137+
2138 /*
2139 * This routine handles page faults. It determines the address and the
2140 * problem, and then passes it off to one of the appropriate routines.
2141@@ -156,6 +173,16 @@ bad_area:
2142 up_read(&mm->mmap_sem);
2143
2144 if (user_mode(regs)) {
2145+
2146+#ifdef CONFIG_PAX_PAGEEXEC
2147+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2148+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2149+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2150+ do_group_exit(SIGKILL);
2151+ }
2152+ }
2153+#endif
2154+
2155 if (exception_trace && printk_ratelimit())
2156 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2157 "sp %08lx ecr %lu\n",
2158diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2159index 568885a..f8008df 100644
2160--- a/arch/blackfin/include/asm/cache.h
2161+++ b/arch/blackfin/include/asm/cache.h
2162@@ -7,6 +7,7 @@
2163 #ifndef __ARCH_BLACKFIN_CACHE_H
2164 #define __ARCH_BLACKFIN_CACHE_H
2165
2166+#include <linux/const.h>
2167 #include <linux/linkage.h> /* for asmlinkage */
2168
2169 /*
2170@@ -14,7 +15,7 @@
2171 * Blackfin loads 32 bytes for cache
2172 */
2173 #define L1_CACHE_SHIFT 5
2174-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2175+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2176 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2177
2178 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2179diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2180index aea2718..3639a60 100644
2181--- a/arch/cris/include/arch-v10/arch/cache.h
2182+++ b/arch/cris/include/arch-v10/arch/cache.h
2183@@ -1,8 +1,9 @@
2184 #ifndef _ASM_ARCH_CACHE_H
2185 #define _ASM_ARCH_CACHE_H
2186
2187+#include <linux/const.h>
2188 /* Etrax 100LX have 32-byte cache-lines. */
2189-#define L1_CACHE_BYTES 32
2190 #define L1_CACHE_SHIFT 5
2191+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2192
2193 #endif /* _ASM_ARCH_CACHE_H */
2194diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2195index 7caf25d..ee65ac5 100644
2196--- a/arch/cris/include/arch-v32/arch/cache.h
2197+++ b/arch/cris/include/arch-v32/arch/cache.h
2198@@ -1,11 +1,12 @@
2199 #ifndef _ASM_CRIS_ARCH_CACHE_H
2200 #define _ASM_CRIS_ARCH_CACHE_H
2201
2202+#include <linux/const.h>
2203 #include <arch/hwregs/dma.h>
2204
2205 /* A cache-line is 32 bytes. */
2206-#define L1_CACHE_BYTES 32
2207 #define L1_CACHE_SHIFT 5
2208+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2209
2210 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
2211
2212diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2213index b86329d..6709906 100644
2214--- a/arch/frv/include/asm/atomic.h
2215+++ b/arch/frv/include/asm/atomic.h
2216@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
2217 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2218 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2219
2220+#define atomic64_read_unchecked(v) atomic64_read(v)
2221+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2222+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2223+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2224+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2225+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2226+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2227+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2228+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2229+
2230 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2231 {
2232 int c, old;
2233diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2234index 2797163..c2a401d 100644
2235--- a/arch/frv/include/asm/cache.h
2236+++ b/arch/frv/include/asm/cache.h
2237@@ -12,10 +12,11 @@
2238 #ifndef __ASM_CACHE_H
2239 #define __ASM_CACHE_H
2240
2241+#include <linux/const.h>
2242
2243 /* bytes per L1 cache line */
2244 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2245-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2246+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2247
2248 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2249 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2250diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2251index f8e16b2..c73ff79 100644
2252--- a/arch/frv/include/asm/kmap_types.h
2253+++ b/arch/frv/include/asm/kmap_types.h
2254@@ -23,6 +23,7 @@ enum km_type {
2255 KM_IRQ1,
2256 KM_SOFTIRQ0,
2257 KM_SOFTIRQ1,
2258+ KM_CLEARPAGE,
2259 KM_TYPE_NR
2260 };
2261
2262diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2263index 385fd30..6c3d97e 100644
2264--- a/arch/frv/mm/elf-fdpic.c
2265+++ b/arch/frv/mm/elf-fdpic.c
2266@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2267 if (addr) {
2268 addr = PAGE_ALIGN(addr);
2269 vma = find_vma(current->mm, addr);
2270- if (TASK_SIZE - len >= addr &&
2271- (!vma || addr + len <= vma->vm_start))
2272+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2273 goto success;
2274 }
2275
2276@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2277 for (; vma; vma = vma->vm_next) {
2278 if (addr > limit)
2279 break;
2280- if (addr + len <= vma->vm_start)
2281+ if (check_heap_stack_gap(vma, addr, len))
2282 goto success;
2283 addr = vma->vm_end;
2284 }
2285@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2286 for (; vma; vma = vma->vm_next) {
2287 if (addr > limit)
2288 break;
2289- if (addr + len <= vma->vm_start)
2290+ if (check_heap_stack_gap(vma, addr, len))
2291 goto success;
2292 addr = vma->vm_end;
2293 }
2294diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2295index c635028..6d9445a 100644
2296--- a/arch/h8300/include/asm/cache.h
2297+++ b/arch/h8300/include/asm/cache.h
2298@@ -1,8 +1,10 @@
2299 #ifndef __ARCH_H8300_CACHE_H
2300 #define __ARCH_H8300_CACHE_H
2301
2302+#include <linux/const.h>
2303+
2304 /* bytes per L1 cache line */
2305-#define L1_CACHE_BYTES 4
2306+#define L1_CACHE_BYTES _AC(4,UL)
2307
2308 /* m68k-elf-gcc 2.95.2 doesn't like these */
2309
2310diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2311index 0f01de2..d37d309 100644
2312--- a/arch/hexagon/include/asm/cache.h
2313+++ b/arch/hexagon/include/asm/cache.h
2314@@ -21,9 +21,11 @@
2315 #ifndef __ASM_CACHE_H
2316 #define __ASM_CACHE_H
2317
2318+#include <linux/const.h>
2319+
2320 /* Bytes per L1 cache line */
2321-#define L1_CACHE_SHIFT (5)
2322-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2323+#define L1_CACHE_SHIFT 5
2324+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2325
2326 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2327 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2328diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2329index 6e6fe18..a6ae668 100644
2330--- a/arch/ia64/include/asm/atomic.h
2331+++ b/arch/ia64/include/asm/atomic.h
2332@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2333 #define atomic64_inc(v) atomic64_add(1, (v))
2334 #define atomic64_dec(v) atomic64_sub(1, (v))
2335
2336+#define atomic64_read_unchecked(v) atomic64_read(v)
2337+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2338+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2339+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2340+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2341+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2342+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2343+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2344+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2345+
2346 /* Atomic operations are already serializing */
2347 #define smp_mb__before_atomic_dec() barrier()
2348 #define smp_mb__after_atomic_dec() barrier()
2349diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2350index 988254a..e1ee885 100644
2351--- a/arch/ia64/include/asm/cache.h
2352+++ b/arch/ia64/include/asm/cache.h
2353@@ -1,6 +1,7 @@
2354 #ifndef _ASM_IA64_CACHE_H
2355 #define _ASM_IA64_CACHE_H
2356
2357+#include <linux/const.h>
2358
2359 /*
2360 * Copyright (C) 1998-2000 Hewlett-Packard Co
2361@@ -9,7 +10,7 @@
2362
2363 /* Bytes per L1 (data) cache line. */
2364 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2365-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2366+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2367
2368 #ifdef CONFIG_SMP
2369 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2370diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2371index b5298eb..67c6e62 100644
2372--- a/arch/ia64/include/asm/elf.h
2373+++ b/arch/ia64/include/asm/elf.h
2374@@ -42,6 +42,13 @@
2375 */
2376 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2377
2378+#ifdef CONFIG_PAX_ASLR
2379+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2380+
2381+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2382+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2383+#endif
2384+
2385 #define PT_IA_64_UNWIND 0x70000001
2386
2387 /* IA-64 relocations: */
2388diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
2389index 96a8d92..617a1cf 100644
2390--- a/arch/ia64/include/asm/pgalloc.h
2391+++ b/arch/ia64/include/asm/pgalloc.h
2392@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2393 pgd_val(*pgd_entry) = __pa(pud);
2394 }
2395
2396+static inline void
2397+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2398+{
2399+ pgd_populate(mm, pgd_entry, pud);
2400+}
2401+
2402 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2403 {
2404 return quicklist_alloc(0, GFP_KERNEL, NULL);
2405@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2406 pud_val(*pud_entry) = __pa(pmd);
2407 }
2408
2409+static inline void
2410+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2411+{
2412+ pud_populate(mm, pud_entry, pmd);
2413+}
2414+
2415 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
2416 {
2417 return quicklist_alloc(0, GFP_KERNEL, NULL);
2418diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2419index 815810c..d60bd4c 100644
2420--- a/arch/ia64/include/asm/pgtable.h
2421+++ b/arch/ia64/include/asm/pgtable.h
2422@@ -12,7 +12,7 @@
2423 * David Mosberger-Tang <davidm@hpl.hp.com>
2424 */
2425
2426-
2427+#include <linux/const.h>
2428 #include <asm/mman.h>
2429 #include <asm/page.h>
2430 #include <asm/processor.h>
2431@@ -142,6 +142,17 @@
2432 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2433 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2434 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2435+
2436+#ifdef CONFIG_PAX_PAGEEXEC
2437+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2438+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2439+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2440+#else
2441+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2442+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2443+# define PAGE_COPY_NOEXEC PAGE_COPY
2444+#endif
2445+
2446 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2447 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2448 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2449diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2450index 54ff557..70c88b7 100644
2451--- a/arch/ia64/include/asm/spinlock.h
2452+++ b/arch/ia64/include/asm/spinlock.h
2453@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2454 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2455
2456 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2457- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2458+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2459 }
2460
2461 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2462diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2463index 449c8c0..432a3d2 100644
2464--- a/arch/ia64/include/asm/uaccess.h
2465+++ b/arch/ia64/include/asm/uaccess.h
2466@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2467 const void *__cu_from = (from); \
2468 long __cu_len = (n); \
2469 \
2470- if (__access_ok(__cu_to, __cu_len, get_fs())) \
2471+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2472 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2473 __cu_len; \
2474 })
2475@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2476 long __cu_len = (n); \
2477 \
2478 __chk_user_ptr(__cu_from); \
2479- if (__access_ok(__cu_from, __cu_len, get_fs())) \
2480+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2481 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2482 __cu_len; \
2483 })
2484diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2485index 24603be..948052d 100644
2486--- a/arch/ia64/kernel/module.c
2487+++ b/arch/ia64/kernel/module.c
2488@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2489 void
2490 module_free (struct module *mod, void *module_region)
2491 {
2492- if (mod && mod->arch.init_unw_table &&
2493- module_region == mod->module_init) {
2494+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2495 unw_remove_unwind_table(mod->arch.init_unw_table);
2496 mod->arch.init_unw_table = NULL;
2497 }
2498@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2499 }
2500
2501 static inline int
2502+in_init_rx (const struct module *mod, uint64_t addr)
2503+{
2504+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2505+}
2506+
2507+static inline int
2508+in_init_rw (const struct module *mod, uint64_t addr)
2509+{
2510+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2511+}
2512+
2513+static inline int
2514 in_init (const struct module *mod, uint64_t addr)
2515 {
2516- return addr - (uint64_t) mod->module_init < mod->init_size;
2517+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2518+}
2519+
2520+static inline int
2521+in_core_rx (const struct module *mod, uint64_t addr)
2522+{
2523+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2524+}
2525+
2526+static inline int
2527+in_core_rw (const struct module *mod, uint64_t addr)
2528+{
2529+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2530 }
2531
2532 static inline int
2533 in_core (const struct module *mod, uint64_t addr)
2534 {
2535- return addr - (uint64_t) mod->module_core < mod->core_size;
2536+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2537 }
2538
2539 static inline int
2540@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2541 break;
2542
2543 case RV_BDREL:
2544- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2545+ if (in_init_rx(mod, val))
2546+ val -= (uint64_t) mod->module_init_rx;
2547+ else if (in_init_rw(mod, val))
2548+ val -= (uint64_t) mod->module_init_rw;
2549+ else if (in_core_rx(mod, val))
2550+ val -= (uint64_t) mod->module_core_rx;
2551+ else if (in_core_rw(mod, val))
2552+ val -= (uint64_t) mod->module_core_rw;
2553 break;
2554
2555 case RV_LTV:
2556@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2557 * addresses have been selected...
2558 */
2559 uint64_t gp;
2560- if (mod->core_size > MAX_LTOFF)
2561+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2562 /*
2563 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2564 * at the end of the module.
2565 */
2566- gp = mod->core_size - MAX_LTOFF / 2;
2567+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2568 else
2569- gp = mod->core_size / 2;
2570- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2571+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2572+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2573 mod->arch.gp = gp;
2574 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2575 }
2576diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2577index d9439ef..b9a4303 100644
2578--- a/arch/ia64/kernel/sys_ia64.c
2579+++ b/arch/ia64/kernel/sys_ia64.c
2580@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2581 if (REGION_NUMBER(addr) == RGN_HPAGE)
2582 addr = 0;
2583 #endif
2584+
2585+#ifdef CONFIG_PAX_RANDMMAP
2586+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2587+ addr = mm->free_area_cache;
2588+ else
2589+#endif
2590+
2591 if (!addr)
2592 addr = mm->free_area_cache;
2593
2594@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2595 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2596 /* At this point: (!vma || addr < vma->vm_end). */
2597 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2598- if (start_addr != TASK_UNMAPPED_BASE) {
2599+ if (start_addr != mm->mmap_base) {
2600 /* Start a new search --- just in case we missed some holes. */
2601- addr = TASK_UNMAPPED_BASE;
2602+ addr = mm->mmap_base;
2603 goto full_search;
2604 }
2605 return -ENOMEM;
2606 }
2607- if (!vma || addr + len <= vma->vm_start) {
2608+ if (check_heap_stack_gap(vma, addr, len)) {
2609 /* Remember the address where we stopped this search: */
2610 mm->free_area_cache = addr + len;
2611 return addr;
2612diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2613index 0ccb28f..8992469 100644
2614--- a/arch/ia64/kernel/vmlinux.lds.S
2615+++ b/arch/ia64/kernel/vmlinux.lds.S
2616@@ -198,7 +198,7 @@ SECTIONS {
2617 /* Per-cpu data: */
2618 . = ALIGN(PERCPU_PAGE_SIZE);
2619 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2620- __phys_per_cpu_start = __per_cpu_load;
2621+ __phys_per_cpu_start = per_cpu_load;
2622 /*
2623 * ensure percpu data fits
2624 * into percpu page size
2625diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2626index 02d29c2..ea893df 100644
2627--- a/arch/ia64/mm/fault.c
2628+++ b/arch/ia64/mm/fault.c
2629@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2630 return pte_present(pte);
2631 }
2632
2633+#ifdef CONFIG_PAX_PAGEEXEC
2634+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2635+{
2636+ unsigned long i;
2637+
2638+ printk(KERN_ERR "PAX: bytes at PC: ");
2639+ for (i = 0; i < 8; i++) {
2640+ unsigned int c;
2641+ if (get_user(c, (unsigned int *)pc+i))
2642+ printk(KERN_CONT "???????? ");
2643+ else
2644+ printk(KERN_CONT "%08x ", c);
2645+ }
2646+ printk("\n");
2647+}
2648+#endif
2649+
2650 void __kprobes
2651 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2652 {
2653@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2654 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2655 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2656
2657- if ((vma->vm_flags & mask) != mask)
2658+ if ((vma->vm_flags & mask) != mask) {
2659+
2660+#ifdef CONFIG_PAX_PAGEEXEC
2661+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2662+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2663+ goto bad_area;
2664+
2665+ up_read(&mm->mmap_sem);
2666+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2667+ do_group_exit(SIGKILL);
2668+ }
2669+#endif
2670+
2671 goto bad_area;
2672
2673+ }
2674+
2675 /*
2676 * If for any reason at all we couldn't handle the fault, make
2677 * sure we exit gracefully rather than endlessly redo the
2678diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2679index 5ca674b..e0e1b70 100644
2680--- a/arch/ia64/mm/hugetlbpage.c
2681+++ b/arch/ia64/mm/hugetlbpage.c
2682@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2683 /* At this point: (!vmm || addr < vmm->vm_end). */
2684 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2685 return -ENOMEM;
2686- if (!vmm || (addr + len) <= vmm->vm_start)
2687+ if (check_heap_stack_gap(vmm, addr, len))
2688 return addr;
2689 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2690 }
2691diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2692index 0eab454..bd794f2 100644
2693--- a/arch/ia64/mm/init.c
2694+++ b/arch/ia64/mm/init.c
2695@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
2696 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2697 vma->vm_end = vma->vm_start + PAGE_SIZE;
2698 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2699+
2700+#ifdef CONFIG_PAX_PAGEEXEC
2701+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2702+ vma->vm_flags &= ~VM_EXEC;
2703+
2704+#ifdef CONFIG_PAX_MPROTECT
2705+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
2706+ vma->vm_flags &= ~VM_MAYEXEC;
2707+#endif
2708+
2709+ }
2710+#endif
2711+
2712 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2713 down_write(&current->mm->mmap_sem);
2714 if (insert_vm_struct(current->mm, vma)) {
2715diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2716index 40b3ee9..8c2c112 100644
2717--- a/arch/m32r/include/asm/cache.h
2718+++ b/arch/m32r/include/asm/cache.h
2719@@ -1,8 +1,10 @@
2720 #ifndef _ASM_M32R_CACHE_H
2721 #define _ASM_M32R_CACHE_H
2722
2723+#include <linux/const.h>
2724+
2725 /* L1 cache line size */
2726 #define L1_CACHE_SHIFT 4
2727-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2728+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2729
2730 #endif /* _ASM_M32R_CACHE_H */
2731diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2732index 82abd15..d95ae5d 100644
2733--- a/arch/m32r/lib/usercopy.c
2734+++ b/arch/m32r/lib/usercopy.c
2735@@ -14,6 +14,9 @@
2736 unsigned long
2737 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2738 {
2739+ if ((long)n < 0)
2740+ return n;
2741+
2742 prefetch(from);
2743 if (access_ok(VERIFY_WRITE, to, n))
2744 __copy_user(to,from,n);
2745@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2746 unsigned long
2747 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2748 {
2749+ if ((long)n < 0)
2750+ return n;
2751+
2752 prefetchw(to);
2753 if (access_ok(VERIFY_READ, from, n))
2754 __copy_user_zeroing(to,from,n);
2755diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2756index 0395c51..5f26031 100644
2757--- a/arch/m68k/include/asm/cache.h
2758+++ b/arch/m68k/include/asm/cache.h
2759@@ -4,9 +4,11 @@
2760 #ifndef __ARCH_M68K_CACHE_H
2761 #define __ARCH_M68K_CACHE_H
2762
2763+#include <linux/const.h>
2764+
2765 /* bytes per L1 cache line */
2766 #define L1_CACHE_SHIFT 4
2767-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2768+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2769
2770 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2771
2772diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2773index 4efe96a..60e8699 100644
2774--- a/arch/microblaze/include/asm/cache.h
2775+++ b/arch/microblaze/include/asm/cache.h
2776@@ -13,11 +13,12 @@
2777 #ifndef _ASM_MICROBLAZE_CACHE_H
2778 #define _ASM_MICROBLAZE_CACHE_H
2779
2780+#include <linux/const.h>
2781 #include <asm/registers.h>
2782
2783 #define L1_CACHE_SHIFT 5
2784 /* word-granular cache in microblaze */
2785-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2786+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2787
2788 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2789
2790diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2791index 3f4c5cb..3439c6e 100644
2792--- a/arch/mips/include/asm/atomic.h
2793+++ b/arch/mips/include/asm/atomic.h
2794@@ -21,6 +21,10 @@
2795 #include <asm/cmpxchg.h>
2796 #include <asm/war.h>
2797
2798+#ifdef CONFIG_GENERIC_ATOMIC64
2799+#include <asm-generic/atomic64.h>
2800+#endif
2801+
2802 #define ATOMIC_INIT(i) { (i) }
2803
2804 /*
2805@@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2806 */
2807 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2808
2809+#define atomic64_read_unchecked(v) atomic64_read(v)
2810+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2811+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2812+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2813+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2814+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2815+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2816+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2817+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2818+
2819 #endif /* CONFIG_64BIT */
2820
2821 /*
2822diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2823index b4db69f..8f3b093 100644
2824--- a/arch/mips/include/asm/cache.h
2825+++ b/arch/mips/include/asm/cache.h
2826@@ -9,10 +9,11 @@
2827 #ifndef _ASM_CACHE_H
2828 #define _ASM_CACHE_H
2829
2830+#include <linux/const.h>
2831 #include <kmalloc.h>
2832
2833 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2834-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2835+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2836
2837 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2838 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2839diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2840index 455c0ac..ad65fbe 100644
2841--- a/arch/mips/include/asm/elf.h
2842+++ b/arch/mips/include/asm/elf.h
2843@@ -372,13 +372,16 @@ extern const char *__elf_platform;
2844 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2845 #endif
2846
2847+#ifdef CONFIG_PAX_ASLR
2848+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2849+
2850+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2851+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2852+#endif
2853+
2854 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2855 struct linux_binprm;
2856 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2857 int uses_interp);
2858
2859-struct mm_struct;
2860-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2861-#define arch_randomize_brk arch_randomize_brk
2862-
2863 #endif /* _ASM_ELF_H */
2864diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
2865index c1f6afa..38cc6e9 100644
2866--- a/arch/mips/include/asm/exec.h
2867+++ b/arch/mips/include/asm/exec.h
2868@@ -12,6 +12,6 @@
2869 #ifndef _ASM_EXEC_H
2870 #define _ASM_EXEC_H
2871
2872-extern unsigned long arch_align_stack(unsigned long sp);
2873+#define arch_align_stack(x) ((x) & ~0xfUL)
2874
2875 #endif /* _ASM_EXEC_H */
2876diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2877index da9bd7d..91aa7ab 100644
2878--- a/arch/mips/include/asm/page.h
2879+++ b/arch/mips/include/asm/page.h
2880@@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2881 #ifdef CONFIG_CPU_MIPS32
2882 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2883 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2884- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2885+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2886 #else
2887 typedef struct { unsigned long long pte; } pte_t;
2888 #define pte_val(x) ((x).pte)
2889diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2890index 881d18b..cea38bc 100644
2891--- a/arch/mips/include/asm/pgalloc.h
2892+++ b/arch/mips/include/asm/pgalloc.h
2893@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2894 {
2895 set_pud(pud, __pud((unsigned long)pmd));
2896 }
2897+
2898+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2899+{
2900+ pud_populate(mm, pud, pmd);
2901+}
2902 #endif
2903
2904 /*
2905diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
2906index ca97e0e..cd08920 100644
2907--- a/arch/mips/include/asm/thread_info.h
2908+++ b/arch/mips/include/asm/thread_info.h
2909@@ -111,6 +111,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
2910 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
2911 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
2912 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
2913+/* li takes a 32bit immediate */
2914+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
2915 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
2916
2917 #ifdef CONFIG_MIPS32_O32
2918@@ -134,15 +136,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
2919 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
2920 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
2921 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
2922+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
2923+
2924+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2925
2926 /* work to do in syscall_trace_leave() */
2927-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
2928+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2929
2930 /* work to do on interrupt/exception return */
2931 #define _TIF_WORK_MASK (0x0000ffef & \
2932 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
2933 /* work to do on any return to u-space */
2934-#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
2935+#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
2936
2937 #endif /* __KERNEL__ */
2938
2939diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2940index 9fdd8bc..4bd7f1a 100644
2941--- a/arch/mips/kernel/binfmt_elfn32.c
2942+++ b/arch/mips/kernel/binfmt_elfn32.c
2943@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2944 #undef ELF_ET_DYN_BASE
2945 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2946
2947+#ifdef CONFIG_PAX_ASLR
2948+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2949+
2950+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2951+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2952+#endif
2953+
2954 #include <asm/processor.h>
2955 #include <linux/module.h>
2956 #include <linux/elfcore.h>
2957diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2958index ff44823..97f8906 100644
2959--- a/arch/mips/kernel/binfmt_elfo32.c
2960+++ b/arch/mips/kernel/binfmt_elfo32.c
2961@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2962 #undef ELF_ET_DYN_BASE
2963 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2964
2965+#ifdef CONFIG_PAX_ASLR
2966+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2967+
2968+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2969+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2970+#endif
2971+
2972 #include <asm/processor.h>
2973
2974 /*
2975diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2976index e9a5fd7..378809a 100644
2977--- a/arch/mips/kernel/process.c
2978+++ b/arch/mips/kernel/process.c
2979@@ -480,15 +480,3 @@ unsigned long get_wchan(struct task_struct *task)
2980 out:
2981 return pc;
2982 }
2983-
2984-/*
2985- * Don't forget that the stack pointer must be aligned on a 8 bytes
2986- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2987- */
2988-unsigned long arch_align_stack(unsigned long sp)
2989-{
2990- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2991- sp -= get_random_int() & ~PAGE_MASK;
2992-
2993- return sp & ALMASK;
2994-}
2995diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
2996index 4812c6d..2069554 100644
2997--- a/arch/mips/kernel/ptrace.c
2998+++ b/arch/mips/kernel/ptrace.c
2999@@ -528,6 +528,10 @@ static inline int audit_arch(void)
3000 return arch;
3001 }
3002
3003+#ifdef CONFIG_GRKERNSEC_SETXID
3004+extern void gr_delayed_cred_worker(void);
3005+#endif
3006+
3007 /*
3008 * Notification of system call entry/exit
3009 * - triggered by current->work.syscall_trace
3010@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
3011 /* do the secure computing check first */
3012 secure_computing_strict(regs->regs[2]);
3013
3014+#ifdef CONFIG_GRKERNSEC_SETXID
3015+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3016+ gr_delayed_cred_worker();
3017+#endif
3018+
3019 if (!(current->ptrace & PT_PTRACED))
3020 goto out;
3021
3022diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
3023index a632bc1..0b77c7c 100644
3024--- a/arch/mips/kernel/scall32-o32.S
3025+++ b/arch/mips/kernel/scall32-o32.S
3026@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3027
3028 stack_done:
3029 lw t0, TI_FLAGS($28) # syscall tracing enabled?
3030- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3031+ li t1, _TIF_SYSCALL_WORK
3032 and t0, t1
3033 bnez t0, syscall_trace_entry # -> yes
3034
3035diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
3036index 3b5a5e9..e1ee86d 100644
3037--- a/arch/mips/kernel/scall64-64.S
3038+++ b/arch/mips/kernel/scall64-64.S
3039@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
3040
3041 sd a3, PT_R26(sp) # save a3 for syscall restarting
3042
3043- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3044+ li t1, _TIF_SYSCALL_WORK
3045 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3046 and t0, t1, t0
3047 bnez t0, syscall_trace_entry
3048diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
3049index 6be6f70..1859577 100644
3050--- a/arch/mips/kernel/scall64-n32.S
3051+++ b/arch/mips/kernel/scall64-n32.S
3052@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
3053
3054 sd a3, PT_R26(sp) # save a3 for syscall restarting
3055
3056- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3057+ li t1, _TIF_SYSCALL_WORK
3058 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3059 and t0, t1, t0
3060 bnez t0, n32_syscall_trace_entry
3061diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3062index 5422855..74e63a3 100644
3063--- a/arch/mips/kernel/scall64-o32.S
3064+++ b/arch/mips/kernel/scall64-o32.S
3065@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3066 PTR 4b, bad_stack
3067 .previous
3068
3069- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3070+ li t1, _TIF_SYSCALL_WORK
3071 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3072 and t0, t1, t0
3073 bnez t0, trace_a_syscall
3074diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
3075index c14f6df..537e729 100644
3076--- a/arch/mips/mm/fault.c
3077+++ b/arch/mips/mm/fault.c
3078@@ -27,6 +27,23 @@
3079 #include <asm/highmem.h> /* For VMALLOC_END */
3080 #include <linux/kdebug.h>
3081
3082+#ifdef CONFIG_PAX_PAGEEXEC
3083+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3084+{
3085+ unsigned long i;
3086+
3087+ printk(KERN_ERR "PAX: bytes at PC: ");
3088+ for (i = 0; i < 5; i++) {
3089+ unsigned int c;
3090+ if (get_user(c, (unsigned int *)pc+i))
3091+ printk(KERN_CONT "???????? ");
3092+ else
3093+ printk(KERN_CONT "%08x ", c);
3094+ }
3095+ printk("\n");
3096+}
3097+#endif
3098+
3099 /*
3100 * This routine handles page faults. It determines the address,
3101 * and the problem, and then passes it off to one of the appropriate
3102diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3103index 302d779..7d35bf8 100644
3104--- a/arch/mips/mm/mmap.c
3105+++ b/arch/mips/mm/mmap.c
3106@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3107 do_color_align = 1;
3108
3109 /* requesting a specific address */
3110+
3111+#ifdef CONFIG_PAX_RANDMMAP
3112+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3113+#endif
3114+
3115 if (addr) {
3116 if (do_color_align)
3117 addr = COLOUR_ALIGN(addr, pgoff);
3118@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3119 addr = PAGE_ALIGN(addr);
3120
3121 vma = find_vma(mm, addr);
3122- if (TASK_SIZE - len >= addr &&
3123- (!vma || addr + len <= vma->vm_start))
3124+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
3125 return addr;
3126 }
3127
3128@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3129 /* At this point: (!vma || addr < vma->vm_end). */
3130 if (TASK_SIZE - len < addr)
3131 return -ENOMEM;
3132- if (!vma || addr + len <= vma->vm_start)
3133+ if (check_heap_stack_gap(vmm, addr, len))
3134 return addr;
3135 addr = vma->vm_end;
3136 if (do_color_align)
3137@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3138 /* make sure it can fit in the remaining address space */
3139 if (likely(addr > len)) {
3140 vma = find_vma(mm, addr - len);
3141- if (!vma || addr <= vma->vm_start) {
3142+ if (check_heap_stack_gap(vmm, addr - len, len))
3143 /* cache the address as a hint for next time */
3144 return mm->free_area_cache = addr - len;
3145 }
3146@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3147 * return with success:
3148 */
3149 vma = find_vma(mm, addr);
3150- if (likely(!vma || addr + len <= vma->vm_start)) {
3151+ if (check_heap_stack_gap(vmm, addr, len)) {
3152 /* cache the address as a hint for next time */
3153 return mm->free_area_cache = addr;
3154 }
3155@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3156 mm->unmap_area = arch_unmap_area_topdown;
3157 }
3158 }
3159-
3160-static inline unsigned long brk_rnd(void)
3161-{
3162- unsigned long rnd = get_random_int();
3163-
3164- rnd = rnd << PAGE_SHIFT;
3165- /* 8MB for 32bit, 256MB for 64bit */
3166- if (TASK_IS_32BIT_ADDR)
3167- rnd = rnd & 0x7ffffful;
3168- else
3169- rnd = rnd & 0xffffffful;
3170-
3171- return rnd;
3172-}
3173-
3174-unsigned long arch_randomize_brk(struct mm_struct *mm)
3175-{
3176- unsigned long base = mm->brk;
3177- unsigned long ret;
3178-
3179- ret = PAGE_ALIGN(base + brk_rnd());
3180-
3181- if (ret < mm->brk)
3182- return mm->brk;
3183-
3184- return ret;
3185-}
3186diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3187index 967d144..db12197 100644
3188--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
3189+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3190@@ -11,12 +11,14 @@
3191 #ifndef _ASM_PROC_CACHE_H
3192 #define _ASM_PROC_CACHE_H
3193
3194+#include <linux/const.h>
3195+
3196 /* L1 cache */
3197
3198 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3199 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
3200-#define L1_CACHE_BYTES 16 /* bytes per entry */
3201 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3202+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3203 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3204
3205 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3206diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3207index bcb5df2..84fabd2 100644
3208--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3209+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3210@@ -16,13 +16,15 @@
3211 #ifndef _ASM_PROC_CACHE_H
3212 #define _ASM_PROC_CACHE_H
3213
3214+#include <linux/const.h>
3215+
3216 /*
3217 * L1 cache
3218 */
3219 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3220 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
3221-#define L1_CACHE_BYTES 32 /* bytes per entry */
3222 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
3223+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3224 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3225
3226 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3227diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3228index 4ce7a01..449202a 100644
3229--- a/arch/openrisc/include/asm/cache.h
3230+++ b/arch/openrisc/include/asm/cache.h
3231@@ -19,11 +19,13 @@
3232 #ifndef __ASM_OPENRISC_CACHE_H
3233 #define __ASM_OPENRISC_CACHE_H
3234
3235+#include <linux/const.h>
3236+
3237 /* FIXME: How can we replace these with values from the CPU...
3238 * they shouldn't be hard-coded!
3239 */
3240
3241-#define L1_CACHE_BYTES 16
3242 #define L1_CACHE_SHIFT 4
3243+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3244
3245 #endif /* __ASM_OPENRISC_CACHE_H */
3246diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3247index af9cf30..2aae9b2 100644
3248--- a/arch/parisc/include/asm/atomic.h
3249+++ b/arch/parisc/include/asm/atomic.h
3250@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3251
3252 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3253
3254+#define atomic64_read_unchecked(v) atomic64_read(v)
3255+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3256+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3257+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3258+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3259+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3260+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3261+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3262+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3263+
3264 #endif /* !CONFIG_64BIT */
3265
3266
3267diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3268index 47f11c7..3420df2 100644
3269--- a/arch/parisc/include/asm/cache.h
3270+++ b/arch/parisc/include/asm/cache.h
3271@@ -5,6 +5,7 @@
3272 #ifndef __ARCH_PARISC_CACHE_H
3273 #define __ARCH_PARISC_CACHE_H
3274
3275+#include <linux/const.h>
3276
3277 /*
3278 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3279@@ -15,13 +16,13 @@
3280 * just ruin performance.
3281 */
3282 #ifdef CONFIG_PA20
3283-#define L1_CACHE_BYTES 64
3284 #define L1_CACHE_SHIFT 6
3285 #else
3286-#define L1_CACHE_BYTES 32
3287 #define L1_CACHE_SHIFT 5
3288 #endif
3289
3290+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3291+
3292 #ifndef __ASSEMBLY__
3293
3294 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3295diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3296index 19f6cb1..6c78cf2 100644
3297--- a/arch/parisc/include/asm/elf.h
3298+++ b/arch/parisc/include/asm/elf.h
3299@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
3300
3301 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3302
3303+#ifdef CONFIG_PAX_ASLR
3304+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3305+
3306+#define PAX_DELTA_MMAP_LEN 16
3307+#define PAX_DELTA_STACK_LEN 16
3308+#endif
3309+
3310 /* This yields a mask that user programs can use to figure out what
3311 instruction set this CPU supports. This could be done in user space,
3312 but it's not easy, and we've already done it here. */
3313diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3314index fc987a1..6e068ef 100644
3315--- a/arch/parisc/include/asm/pgalloc.h
3316+++ b/arch/parisc/include/asm/pgalloc.h
3317@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3318 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3319 }
3320
3321+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3322+{
3323+ pgd_populate(mm, pgd, pmd);
3324+}
3325+
3326 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3327 {
3328 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3329@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3330 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3331 #define pmd_free(mm, x) do { } while (0)
3332 #define pgd_populate(mm, pmd, pte) BUG()
3333+#define pgd_populate_kernel(mm, pmd, pte) BUG()
3334
3335 #endif
3336
3337diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3338index ee99f23..802b0a1 100644
3339--- a/arch/parisc/include/asm/pgtable.h
3340+++ b/arch/parisc/include/asm/pgtable.h
3341@@ -212,6 +212,17 @@ struct vm_area_struct;
3342 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3343 #define PAGE_COPY PAGE_EXECREAD
3344 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3345+
3346+#ifdef CONFIG_PAX_PAGEEXEC
3347+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3348+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3349+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3350+#else
3351+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3352+# define PAGE_COPY_NOEXEC PAGE_COPY
3353+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3354+#endif
3355+
3356 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3357 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3358 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3359diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
3360index 4ba2c93..f5e3974 100644
3361--- a/arch/parisc/include/asm/uaccess.h
3362+++ b/arch/parisc/include/asm/uaccess.h
3363@@ -251,10 +251,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
3364 const void __user *from,
3365 unsigned long n)
3366 {
3367- int sz = __compiletime_object_size(to);
3368+ size_t sz = __compiletime_object_size(to);
3369 int ret = -EFAULT;
3370
3371- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
3372+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
3373 ret = __copy_from_user(to, from, n);
3374 else
3375 copy_from_user_overflow();
3376diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3377index 5e34ccf..672bc9c 100644
3378--- a/arch/parisc/kernel/module.c
3379+++ b/arch/parisc/kernel/module.c
3380@@ -98,16 +98,38 @@
3381
3382 /* three functions to determine where in the module core
3383 * or init pieces the location is */
3384+static inline int in_init_rx(struct module *me, void *loc)
3385+{
3386+ return (loc >= me->module_init_rx &&
3387+ loc < (me->module_init_rx + me->init_size_rx));
3388+}
3389+
3390+static inline int in_init_rw(struct module *me, void *loc)
3391+{
3392+ return (loc >= me->module_init_rw &&
3393+ loc < (me->module_init_rw + me->init_size_rw));
3394+}
3395+
3396 static inline int in_init(struct module *me, void *loc)
3397 {
3398- return (loc >= me->module_init &&
3399- loc <= (me->module_init + me->init_size));
3400+ return in_init_rx(me, loc) || in_init_rw(me, loc);
3401+}
3402+
3403+static inline int in_core_rx(struct module *me, void *loc)
3404+{
3405+ return (loc >= me->module_core_rx &&
3406+ loc < (me->module_core_rx + me->core_size_rx));
3407+}
3408+
3409+static inline int in_core_rw(struct module *me, void *loc)
3410+{
3411+ return (loc >= me->module_core_rw &&
3412+ loc < (me->module_core_rw + me->core_size_rw));
3413 }
3414
3415 static inline int in_core(struct module *me, void *loc)
3416 {
3417- return (loc >= me->module_core &&
3418- loc <= (me->module_core + me->core_size));
3419+ return in_core_rx(me, loc) || in_core_rw(me, loc);
3420 }
3421
3422 static inline int in_local(struct module *me, void *loc)
3423@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3424 }
3425
3426 /* align things a bit */
3427- me->core_size = ALIGN(me->core_size, 16);
3428- me->arch.got_offset = me->core_size;
3429- me->core_size += gots * sizeof(struct got_entry);
3430+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3431+ me->arch.got_offset = me->core_size_rw;
3432+ me->core_size_rw += gots * sizeof(struct got_entry);
3433
3434- me->core_size = ALIGN(me->core_size, 16);
3435- me->arch.fdesc_offset = me->core_size;
3436- me->core_size += fdescs * sizeof(Elf_Fdesc);
3437+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3438+ me->arch.fdesc_offset = me->core_size_rw;
3439+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3440
3441 me->arch.got_max = gots;
3442 me->arch.fdesc_max = fdescs;
3443@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3444
3445 BUG_ON(value == 0);
3446
3447- got = me->module_core + me->arch.got_offset;
3448+ got = me->module_core_rw + me->arch.got_offset;
3449 for (i = 0; got[i].addr; i++)
3450 if (got[i].addr == value)
3451 goto out;
3452@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3453 #ifdef CONFIG_64BIT
3454 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3455 {
3456- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3457+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3458
3459 if (!value) {
3460 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3461@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3462
3463 /* Create new one */
3464 fdesc->addr = value;
3465- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3466+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3467 return (Elf_Addr)fdesc;
3468 }
3469 #endif /* CONFIG_64BIT */
3470@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3471
3472 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3473 end = table + sechdrs[me->arch.unwind_section].sh_size;
3474- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3475+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3476
3477 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3478 me->arch.unwind_section, table, end, gp);
3479diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3480index c9b9322..02d8940 100644
3481--- a/arch/parisc/kernel/sys_parisc.c
3482+++ b/arch/parisc/kernel/sys_parisc.c
3483@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3484 /* At this point: (!vma || addr < vma->vm_end). */
3485 if (TASK_SIZE - len < addr)
3486 return -ENOMEM;
3487- if (!vma || addr + len <= vma->vm_start)
3488+ if (check_heap_stack_gap(vma, addr, len))
3489 return addr;
3490 addr = vma->vm_end;
3491 }
3492@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3493 /* At this point: (!vma || addr < vma->vm_end). */
3494 if (TASK_SIZE - len < addr)
3495 return -ENOMEM;
3496- if (!vma || addr + len <= vma->vm_start)
3497+ if (check_heap_stack_gap(vma, addr, len))
3498 return addr;
3499 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3500 if (addr < vma->vm_end) /* handle wraparound */
3501@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3502 if (flags & MAP_FIXED)
3503 return addr;
3504 if (!addr)
3505- addr = TASK_UNMAPPED_BASE;
3506+ addr = current->mm->mmap_base;
3507
3508 if (filp) {
3509 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3510diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3511index 45ba99f..8e22c33 100644
3512--- a/arch/parisc/kernel/traps.c
3513+++ b/arch/parisc/kernel/traps.c
3514@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3515
3516 down_read(&current->mm->mmap_sem);
3517 vma = find_vma(current->mm,regs->iaoq[0]);
3518- if (vma && (regs->iaoq[0] >= vma->vm_start)
3519- && (vma->vm_flags & VM_EXEC)) {
3520-
3521+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3522 fault_address = regs->iaoq[0];
3523 fault_space = regs->iasq[0];
3524
3525diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3526index 18162ce..94de376 100644
3527--- a/arch/parisc/mm/fault.c
3528+++ b/arch/parisc/mm/fault.c
3529@@ -15,6 +15,7 @@
3530 #include <linux/sched.h>
3531 #include <linux/interrupt.h>
3532 #include <linux/module.h>
3533+#include <linux/unistd.h>
3534
3535 #include <asm/uaccess.h>
3536 #include <asm/traps.h>
3537@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3538 static unsigned long
3539 parisc_acctyp(unsigned long code, unsigned int inst)
3540 {
3541- if (code == 6 || code == 16)
3542+ if (code == 6 || code == 7 || code == 16)
3543 return VM_EXEC;
3544
3545 switch (inst & 0xf0000000) {
3546@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3547 }
3548 #endif
3549
3550+#ifdef CONFIG_PAX_PAGEEXEC
3551+/*
3552+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3553+ *
3554+ * returns 1 when task should be killed
3555+ * 2 when rt_sigreturn trampoline was detected
3556+ * 3 when unpatched PLT trampoline was detected
3557+ */
3558+static int pax_handle_fetch_fault(struct pt_regs *regs)
3559+{
3560+
3561+#ifdef CONFIG_PAX_EMUPLT
3562+ int err;
3563+
3564+ do { /* PaX: unpatched PLT emulation */
3565+ unsigned int bl, depwi;
3566+
3567+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3568+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3569+
3570+ if (err)
3571+ break;
3572+
3573+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3574+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3575+
3576+ err = get_user(ldw, (unsigned int *)addr);
3577+ err |= get_user(bv, (unsigned int *)(addr+4));
3578+ err |= get_user(ldw2, (unsigned int *)(addr+8));
3579+
3580+ if (err)
3581+ break;
3582+
3583+ if (ldw == 0x0E801096U &&
3584+ bv == 0xEAC0C000U &&
3585+ ldw2 == 0x0E881095U)
3586+ {
3587+ unsigned int resolver, map;
3588+
3589+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3590+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3591+ if (err)
3592+ break;
3593+
3594+ regs->gr[20] = instruction_pointer(regs)+8;
3595+ regs->gr[21] = map;
3596+ regs->gr[22] = resolver;
3597+ regs->iaoq[0] = resolver | 3UL;
3598+ regs->iaoq[1] = regs->iaoq[0] + 4;
3599+ return 3;
3600+ }
3601+ }
3602+ } while (0);
3603+#endif
3604+
3605+#ifdef CONFIG_PAX_EMUTRAMP
3606+
3607+#ifndef CONFIG_PAX_EMUSIGRT
3608+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3609+ return 1;
3610+#endif
3611+
3612+ do { /* PaX: rt_sigreturn emulation */
3613+ unsigned int ldi1, ldi2, bel, nop;
3614+
3615+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3616+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3617+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3618+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3619+
3620+ if (err)
3621+ break;
3622+
3623+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3624+ ldi2 == 0x3414015AU &&
3625+ bel == 0xE4008200U &&
3626+ nop == 0x08000240U)
3627+ {
3628+ regs->gr[25] = (ldi1 & 2) >> 1;
3629+ regs->gr[20] = __NR_rt_sigreturn;
3630+ regs->gr[31] = regs->iaoq[1] + 16;
3631+ regs->sr[0] = regs->iasq[1];
3632+ regs->iaoq[0] = 0x100UL;
3633+ regs->iaoq[1] = regs->iaoq[0] + 4;
3634+ regs->iasq[0] = regs->sr[2];
3635+ regs->iasq[1] = regs->sr[2];
3636+ return 2;
3637+ }
3638+ } while (0);
3639+#endif
3640+
3641+ return 1;
3642+}
3643+
3644+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3645+{
3646+ unsigned long i;
3647+
3648+ printk(KERN_ERR "PAX: bytes at PC: ");
3649+ for (i = 0; i < 5; i++) {
3650+ unsigned int c;
3651+ if (get_user(c, (unsigned int *)pc+i))
3652+ printk(KERN_CONT "???????? ");
3653+ else
3654+ printk(KERN_CONT "%08x ", c);
3655+ }
3656+ printk("\n");
3657+}
3658+#endif
3659+
3660 int fixup_exception(struct pt_regs *regs)
3661 {
3662 const struct exception_table_entry *fix;
3663@@ -192,8 +303,33 @@ good_area:
3664
3665 acc_type = parisc_acctyp(code,regs->iir);
3666
3667- if ((vma->vm_flags & acc_type) != acc_type)
3668+ if ((vma->vm_flags & acc_type) != acc_type) {
3669+
3670+#ifdef CONFIG_PAX_PAGEEXEC
3671+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3672+ (address & ~3UL) == instruction_pointer(regs))
3673+ {
3674+ up_read(&mm->mmap_sem);
3675+ switch (pax_handle_fetch_fault(regs)) {
3676+
3677+#ifdef CONFIG_PAX_EMUPLT
3678+ case 3:
3679+ return;
3680+#endif
3681+
3682+#ifdef CONFIG_PAX_EMUTRAMP
3683+ case 2:
3684+ return;
3685+#endif
3686+
3687+ }
3688+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3689+ do_group_exit(SIGKILL);
3690+ }
3691+#endif
3692+
3693 goto bad_area;
3694+ }
3695
3696 /*
3697 * If for any reason at all we couldn't handle the fault, make
3698diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3699index da29032..f76c24c 100644
3700--- a/arch/powerpc/include/asm/atomic.h
3701+++ b/arch/powerpc/include/asm/atomic.h
3702@@ -522,6 +522,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
3703 return t1;
3704 }
3705
3706+#define atomic64_read_unchecked(v) atomic64_read(v)
3707+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3708+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3709+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3710+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3711+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3712+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3713+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3714+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3715+
3716 #endif /* __powerpc64__ */
3717
3718 #endif /* __KERNEL__ */
3719diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3720index 9e495c9..b6878e5 100644
3721--- a/arch/powerpc/include/asm/cache.h
3722+++ b/arch/powerpc/include/asm/cache.h
3723@@ -3,6 +3,7 @@
3724
3725 #ifdef __KERNEL__
3726
3727+#include <linux/const.h>
3728
3729 /* bytes per L1 cache line */
3730 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3731@@ -22,7 +23,7 @@
3732 #define L1_CACHE_SHIFT 7
3733 #endif
3734
3735-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3736+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3737
3738 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3739
3740diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3741index 3bf9cca..e7457d0 100644
3742--- a/arch/powerpc/include/asm/elf.h
3743+++ b/arch/powerpc/include/asm/elf.h
3744@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3745 the loader. We need to make sure that it is out of the way of the program
3746 that it will "exec", and that there is sufficient room for the brk. */
3747
3748-extern unsigned long randomize_et_dyn(unsigned long base);
3749-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3750+#define ELF_ET_DYN_BASE (0x20000000)
3751+
3752+#ifdef CONFIG_PAX_ASLR
3753+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3754+
3755+#ifdef __powerpc64__
3756+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3757+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3758+#else
3759+#define PAX_DELTA_MMAP_LEN 15
3760+#define PAX_DELTA_STACK_LEN 15
3761+#endif
3762+#endif
3763
3764 /*
3765 * Our registers are always unsigned longs, whether we're a 32 bit
3766@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3767 (0x7ff >> (PAGE_SHIFT - 12)) : \
3768 (0x3ffff >> (PAGE_SHIFT - 12)))
3769
3770-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3771-#define arch_randomize_brk arch_randomize_brk
3772-
3773 #endif /* __KERNEL__ */
3774
3775 /*
3776diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
3777index 8196e9c..d83a9f3 100644
3778--- a/arch/powerpc/include/asm/exec.h
3779+++ b/arch/powerpc/include/asm/exec.h
3780@@ -4,6 +4,6 @@
3781 #ifndef _ASM_POWERPC_EXEC_H
3782 #define _ASM_POWERPC_EXEC_H
3783
3784-extern unsigned long arch_align_stack(unsigned long sp);
3785+#define arch_align_stack(x) ((x) & ~0xfUL)
3786
3787 #endif /* _ASM_POWERPC_EXEC_H */
3788diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3789index bca8fdc..61e9580 100644
3790--- a/arch/powerpc/include/asm/kmap_types.h
3791+++ b/arch/powerpc/include/asm/kmap_types.h
3792@@ -27,6 +27,7 @@ enum km_type {
3793 KM_PPC_SYNC_PAGE,
3794 KM_PPC_SYNC_ICACHE,
3795 KM_KDB,
3796+ KM_CLEARPAGE,
3797 KM_TYPE_NR
3798 };
3799
3800diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3801index d4a7f64..451de1c 100644
3802--- a/arch/powerpc/include/asm/mman.h
3803+++ b/arch/powerpc/include/asm/mman.h
3804@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3805 }
3806 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3807
3808-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3809+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3810 {
3811 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3812 }
3813diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3814index f072e97..b436dee 100644
3815--- a/arch/powerpc/include/asm/page.h
3816+++ b/arch/powerpc/include/asm/page.h
3817@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3818 * and needs to be executable. This means the whole heap ends
3819 * up being executable.
3820 */
3821-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3822- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3823+#define VM_DATA_DEFAULT_FLAGS32 \
3824+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3825+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3826
3827 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3828 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3829@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3830 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3831 #endif
3832
3833+#define ktla_ktva(addr) (addr)
3834+#define ktva_ktla(addr) (addr)
3835+
3836 /*
3837 * Use the top bit of the higher-level page table entries to indicate whether
3838 * the entries we point to contain hugepages. This works because we know that
3839diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3840index fed85e6..da5c71b 100644
3841--- a/arch/powerpc/include/asm/page_64.h
3842+++ b/arch/powerpc/include/asm/page_64.h
3843@@ -146,15 +146,18 @@ do { \
3844 * stack by default, so in the absence of a PT_GNU_STACK program header
3845 * we turn execute permission off.
3846 */
3847-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3848- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3849+#define VM_STACK_DEFAULT_FLAGS32 \
3850+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3851+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3852
3853 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3854 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3855
3856+#ifndef CONFIG_PAX_PAGEEXEC
3857 #define VM_STACK_DEFAULT_FLAGS \
3858 (is_32bit_task() ? \
3859 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3860+#endif
3861
3862 #include <asm-generic/getorder.h>
3863
3864diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
3865index 292725c..f87ae14 100644
3866--- a/arch/powerpc/include/asm/pgalloc-64.h
3867+++ b/arch/powerpc/include/asm/pgalloc-64.h
3868@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
3869 #ifndef CONFIG_PPC_64K_PAGES
3870
3871 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
3872+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
3873
3874 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3875 {
3876@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3877 pud_set(pud, (unsigned long)pmd);
3878 }
3879
3880+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3881+{
3882+ pud_populate(mm, pud, pmd);
3883+}
3884+
3885 #define pmd_populate(mm, pmd, pte_page) \
3886 pmd_populate_kernel(mm, pmd, page_address(pte_page))
3887 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
3888@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3889 #else /* CONFIG_PPC_64K_PAGES */
3890
3891 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
3892+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
3893
3894 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
3895 pte_t *pte)
3896diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3897index 2e0e411..7899c68 100644
3898--- a/arch/powerpc/include/asm/pgtable.h
3899+++ b/arch/powerpc/include/asm/pgtable.h
3900@@ -2,6 +2,7 @@
3901 #define _ASM_POWERPC_PGTABLE_H
3902 #ifdef __KERNEL__
3903
3904+#include <linux/const.h>
3905 #ifndef __ASSEMBLY__
3906 #include <asm/processor.h> /* For TASK_SIZE */
3907 #include <asm/mmu.h>
3908diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3909index 4aad413..85d86bf 100644
3910--- a/arch/powerpc/include/asm/pte-hash32.h
3911+++ b/arch/powerpc/include/asm/pte-hash32.h
3912@@ -21,6 +21,7 @@
3913 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3914 #define _PAGE_USER 0x004 /* usermode access allowed */
3915 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3916+#define _PAGE_EXEC _PAGE_GUARDED
3917 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3918 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3919 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3920diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3921index 360585d..c3930ef 100644
3922--- a/arch/powerpc/include/asm/reg.h
3923+++ b/arch/powerpc/include/asm/reg.h
3924@@ -212,6 +212,7 @@
3925 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3926 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3927 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3928+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3929 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3930 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3931 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3932diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
3933index 68831e9..379c695 100644
3934--- a/arch/powerpc/include/asm/thread_info.h
3935+++ b/arch/powerpc/include/asm/thread_info.h
3936@@ -91,12 +91,14 @@ static inline struct thread_info *current_thread_info(void)
3937 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
3938 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
3939 #define TIF_SINGLESTEP 8 /* singlestepping active */
3940-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
3941 #define TIF_SECCOMP 10 /* secure computing */
3942 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
3943 #define TIF_NOERROR 12 /* Force successful syscall return */
3944 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
3945 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
3946+#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
3947+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
3948+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
3949
3950 /* as above, but as bit values */
3951 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3952@@ -113,8 +115,10 @@ static inline struct thread_info *current_thread_info(void)
3953 #define _TIF_NOERROR (1<<TIF_NOERROR)
3954 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
3955 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
3956+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3957 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
3958- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
3959+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
3960+ _TIF_GRSEC_SETXID)
3961
3962 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
3963 _TIF_NOTIFY_RESUME)
3964diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3965index 17bb40c..353c98b 100644
3966--- a/arch/powerpc/include/asm/uaccess.h
3967+++ b/arch/powerpc/include/asm/uaccess.h
3968@@ -13,6 +13,8 @@
3969 #define VERIFY_READ 0
3970 #define VERIFY_WRITE 1
3971
3972+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3973+
3974 /*
3975 * The fs value determines whether argument validity checking should be
3976 * performed or not. If get_fs() == USER_DS, checking is performed, with
3977@@ -329,52 +331,6 @@ do { \
3978 extern unsigned long __copy_tofrom_user(void __user *to,
3979 const void __user *from, unsigned long size);
3980
3981-#ifndef __powerpc64__
3982-
3983-static inline unsigned long copy_from_user(void *to,
3984- const void __user *from, unsigned long n)
3985-{
3986- unsigned long over;
3987-
3988- if (access_ok(VERIFY_READ, from, n))
3989- return __copy_tofrom_user((__force void __user *)to, from, n);
3990- if ((unsigned long)from < TASK_SIZE) {
3991- over = (unsigned long)from + n - TASK_SIZE;
3992- return __copy_tofrom_user((__force void __user *)to, from,
3993- n - over) + over;
3994- }
3995- return n;
3996-}
3997-
3998-static inline unsigned long copy_to_user(void __user *to,
3999- const void *from, unsigned long n)
4000-{
4001- unsigned long over;
4002-
4003- if (access_ok(VERIFY_WRITE, to, n))
4004- return __copy_tofrom_user(to, (__force void __user *)from, n);
4005- if ((unsigned long)to < TASK_SIZE) {
4006- over = (unsigned long)to + n - TASK_SIZE;
4007- return __copy_tofrom_user(to, (__force void __user *)from,
4008- n - over) + over;
4009- }
4010- return n;
4011-}
4012-
4013-#else /* __powerpc64__ */
4014-
4015-#define __copy_in_user(to, from, size) \
4016- __copy_tofrom_user((to), (from), (size))
4017-
4018-extern unsigned long copy_from_user(void *to, const void __user *from,
4019- unsigned long n);
4020-extern unsigned long copy_to_user(void __user *to, const void *from,
4021- unsigned long n);
4022-extern unsigned long copy_in_user(void __user *to, const void __user *from,
4023- unsigned long n);
4024-
4025-#endif /* __powerpc64__ */
4026-
4027 static inline unsigned long __copy_from_user_inatomic(void *to,
4028 const void __user *from, unsigned long n)
4029 {
4030@@ -398,6 +354,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
4031 if (ret == 0)
4032 return 0;
4033 }
4034+
4035+ if (!__builtin_constant_p(n))
4036+ check_object_size(to, n, false);
4037+
4038 return __copy_tofrom_user((__force void __user *)to, from, n);
4039 }
4040
4041@@ -424,6 +384,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
4042 if (ret == 0)
4043 return 0;
4044 }
4045+
4046+ if (!__builtin_constant_p(n))
4047+ check_object_size(from, n, true);
4048+
4049 return __copy_tofrom_user(to, (__force const void __user *)from, n);
4050 }
4051
4052@@ -441,6 +405,92 @@ static inline unsigned long __copy_to_user(void __user *to,
4053 return __copy_to_user_inatomic(to, from, size);
4054 }
4055
4056+#ifndef __powerpc64__
4057+
4058+static inline unsigned long __must_check copy_from_user(void *to,
4059+ const void __user *from, unsigned long n)
4060+{
4061+ unsigned long over;
4062+
4063+ if ((long)n < 0)
4064+ return n;
4065+
4066+ if (access_ok(VERIFY_READ, from, n)) {
4067+ if (!__builtin_constant_p(n))
4068+ check_object_size(to, n, false);
4069+ return __copy_tofrom_user((__force void __user *)to, from, n);
4070+ }
4071+ if ((unsigned long)from < TASK_SIZE) {
4072+ over = (unsigned long)from + n - TASK_SIZE;
4073+ if (!__builtin_constant_p(n - over))
4074+ check_object_size(to, n - over, false);
4075+ return __copy_tofrom_user((__force void __user *)to, from,
4076+ n - over) + over;
4077+ }
4078+ return n;
4079+}
4080+
4081+static inline unsigned long __must_check copy_to_user(void __user *to,
4082+ const void *from, unsigned long n)
4083+{
4084+ unsigned long over;
4085+
4086+ if ((long)n < 0)
4087+ return n;
4088+
4089+ if (access_ok(VERIFY_WRITE, to, n)) {
4090+ if (!__builtin_constant_p(n))
4091+ check_object_size(from, n, true);
4092+ return __copy_tofrom_user(to, (__force void __user *)from, n);
4093+ }
4094+ if ((unsigned long)to < TASK_SIZE) {
4095+ over = (unsigned long)to + n - TASK_SIZE;
4096+ if (!__builtin_constant_p(n))
4097+ check_object_size(from, n - over, true);
4098+ return __copy_tofrom_user(to, (__force void __user *)from,
4099+ n - over) + over;
4100+ }
4101+ return n;
4102+}
4103+
4104+#else /* __powerpc64__ */
4105+
4106+#define __copy_in_user(to, from, size) \
4107+ __copy_tofrom_user((to), (from), (size))
4108+
4109+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
4110+{
4111+ if ((long)n < 0 || n > INT_MAX)
4112+ return n;
4113+
4114+ if (!__builtin_constant_p(n))
4115+ check_object_size(to, n, false);
4116+
4117+ if (likely(access_ok(VERIFY_READ, from, n)))
4118+ n = __copy_from_user(to, from, n);
4119+ else
4120+ memset(to, 0, n);
4121+ return n;
4122+}
4123+
4124+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
4125+{
4126+ if ((long)n < 0 || n > INT_MAX)
4127+ return n;
4128+
4129+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
4130+ if (!__builtin_constant_p(n))
4131+ check_object_size(from, n, true);
4132+ n = __copy_to_user(to, from, n);
4133+ }
4134+ return n;
4135+}
4136+
4137+extern unsigned long copy_in_user(void __user *to, const void __user *from,
4138+ unsigned long n);
4139+
4140+#endif /* __powerpc64__ */
4141+
4142 extern unsigned long __clear_user(void __user *addr, unsigned long size);
4143
4144 static inline unsigned long clear_user(void __user *addr, unsigned long size)
4145diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
4146index 7215cc2..a9730c1 100644
4147--- a/arch/powerpc/kernel/exceptions-64e.S
4148+++ b/arch/powerpc/kernel/exceptions-64e.S
4149@@ -661,6 +661,7 @@ storage_fault_common:
4150 std r14,_DAR(r1)
4151 std r15,_DSISR(r1)
4152 addi r3,r1,STACK_FRAME_OVERHEAD
4153+ bl .save_nvgprs
4154 mr r4,r14
4155 mr r5,r15
4156 ld r14,PACA_EXGEN+EX_R14(r13)
4157@@ -669,8 +670,7 @@ storage_fault_common:
4158 cmpdi r3,0
4159 bne- 1f
4160 b .ret_from_except_lite
4161-1: bl .save_nvgprs
4162- mr r5,r3
4163+1: mr r5,r3
4164 addi r3,r1,STACK_FRAME_OVERHEAD
4165 ld r4,_DAR(r1)
4166 bl .bad_page_fault
4167diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
4168index 1c06d29..c2a339b 100644
4169--- a/arch/powerpc/kernel/exceptions-64s.S
4170+++ b/arch/powerpc/kernel/exceptions-64s.S
4171@@ -888,10 +888,10 @@ handle_page_fault:
4172 11: ld r4,_DAR(r1)
4173 ld r5,_DSISR(r1)
4174 addi r3,r1,STACK_FRAME_OVERHEAD
4175+ bl .save_nvgprs
4176 bl .do_page_fault
4177 cmpdi r3,0
4178 beq+ 12f
4179- bl .save_nvgprs
4180 mr r5,r3
4181 addi r3,r1,STACK_FRAME_OVERHEAD
4182 lwz r4,_DAR(r1)
4183diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4184index 2e3200c..72095ce 100644
4185--- a/arch/powerpc/kernel/module_32.c
4186+++ b/arch/powerpc/kernel/module_32.c
4187@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4188 me->arch.core_plt_section = i;
4189 }
4190 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4191- printk("Module doesn't contain .plt or .init.plt sections.\n");
4192+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4193 return -ENOEXEC;
4194 }
4195
4196@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
4197
4198 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4199 /* Init, or core PLT? */
4200- if (location >= mod->module_core
4201- && location < mod->module_core + mod->core_size)
4202+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4203+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4204 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4205- else
4206+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4207+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4208 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4209+ else {
4210+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4211+ return ~0UL;
4212+ }
4213
4214 /* Find this entry, or if that fails, the next avail. entry */
4215 while (entry->jump[0]) {
4216diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4217index 1a1f2dd..f4d1bb4 100644
4218--- a/arch/powerpc/kernel/process.c
4219+++ b/arch/powerpc/kernel/process.c
4220@@ -681,8 +681,8 @@ void show_regs(struct pt_regs * regs)
4221 * Lookup NIP late so we have the best change of getting the
4222 * above info out without failing
4223 */
4224- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4225- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4226+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4227+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4228 #endif
4229 show_stack(current, (unsigned long *) regs->gpr[1]);
4230 if (!user_mode(regs))
4231@@ -1181,10 +1181,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4232 newsp = stack[0];
4233 ip = stack[STACK_FRAME_LR_SAVE];
4234 if (!firstframe || ip != lr) {
4235- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4236+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4237 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4238 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4239- printk(" (%pS)",
4240+ printk(" (%pA)",
4241 (void *)current->ret_stack[curr_frame].ret);
4242 curr_frame--;
4243 }
4244@@ -1204,7 +1204,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4245 struct pt_regs *regs = (struct pt_regs *)
4246 (sp + STACK_FRAME_OVERHEAD);
4247 lr = regs->link;
4248- printk("--- Exception: %lx at %pS\n LR = %pS\n",
4249+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
4250 regs->trap, (void *)regs->nip, (void *)lr);
4251 firstframe = 1;
4252 }
4253@@ -1246,58 +1246,3 @@ void __ppc64_runlatch_off(void)
4254 mtspr(SPRN_CTRLT, ctrl);
4255 }
4256 #endif /* CONFIG_PPC64 */
4257-
4258-unsigned long arch_align_stack(unsigned long sp)
4259-{
4260- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4261- sp -= get_random_int() & ~PAGE_MASK;
4262- return sp & ~0xf;
4263-}
4264-
4265-static inline unsigned long brk_rnd(void)
4266-{
4267- unsigned long rnd = 0;
4268-
4269- /* 8MB for 32bit, 1GB for 64bit */
4270- if (is_32bit_task())
4271- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4272- else
4273- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4274-
4275- return rnd << PAGE_SHIFT;
4276-}
4277-
4278-unsigned long arch_randomize_brk(struct mm_struct *mm)
4279-{
4280- unsigned long base = mm->brk;
4281- unsigned long ret;
4282-
4283-#ifdef CONFIG_PPC_STD_MMU_64
4284- /*
4285- * If we are using 1TB segments and we are allowed to randomise
4286- * the heap, we can put it above 1TB so it is backed by a 1TB
4287- * segment. Otherwise the heap will be in the bottom 1TB
4288- * which always uses 256MB segments and this may result in a
4289- * performance penalty.
4290- */
4291- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4292- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4293-#endif
4294-
4295- ret = PAGE_ALIGN(base + brk_rnd());
4296-
4297- if (ret < mm->brk)
4298- return mm->brk;
4299-
4300- return ret;
4301-}
4302-
4303-unsigned long randomize_et_dyn(unsigned long base)
4304-{
4305- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4306-
4307- if (ret < base)
4308- return base;
4309-
4310- return ret;
4311-}
4312diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4313index c10fc28..c4ef063 100644
4314--- a/arch/powerpc/kernel/ptrace.c
4315+++ b/arch/powerpc/kernel/ptrace.c
4316@@ -1660,6 +1660,10 @@ long arch_ptrace(struct task_struct *child, long request,
4317 return ret;
4318 }
4319
4320+#ifdef CONFIG_GRKERNSEC_SETXID
4321+extern void gr_delayed_cred_worker(void);
4322+#endif
4323+
4324 /*
4325 * We must return the syscall number to actually look up in the table.
4326 * This can be -1L to skip running any syscall at all.
4327@@ -1670,6 +1674,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
4328
4329 secure_computing_strict(regs->gpr[0]);
4330
4331+#ifdef CONFIG_GRKERNSEC_SETXID
4332+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4333+ gr_delayed_cred_worker();
4334+#endif
4335+
4336 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
4337 tracehook_report_syscall_entry(regs))
4338 /*
4339@@ -1704,6 +1713,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
4340 {
4341 int step;
4342
4343+#ifdef CONFIG_GRKERNSEC_SETXID
4344+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4345+ gr_delayed_cred_worker();
4346+#endif
4347+
4348 audit_syscall_exit(regs);
4349
4350 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
4351diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4352index 8b4c049..dcd6ef3 100644
4353--- a/arch/powerpc/kernel/signal_32.c
4354+++ b/arch/powerpc/kernel/signal_32.c
4355@@ -852,7 +852,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4356 /* Save user registers on the stack */
4357 frame = &rt_sf->uc.uc_mcontext;
4358 addr = frame;
4359- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4360+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4361 if (save_user_regs(regs, frame, 0, 1))
4362 goto badframe;
4363 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4364diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4365index d183f87..1867f1a 100644
4366--- a/arch/powerpc/kernel/signal_64.c
4367+++ b/arch/powerpc/kernel/signal_64.c
4368@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4369 current->thread.fpscr.val = 0;
4370
4371 /* Set up to return from userspace. */
4372- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4373+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4374 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4375 } else {
4376 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4377diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
4378index f2496f2..4e3cc47 100644
4379--- a/arch/powerpc/kernel/syscalls.c
4380+++ b/arch/powerpc/kernel/syscalls.c
4381@@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality)
4382 long ret;
4383
4384 if (personality(current->personality) == PER_LINUX32
4385- && personality == PER_LINUX)
4386- personality = PER_LINUX32;
4387+ && personality(personality) == PER_LINUX)
4388+ personality = (personality & ~PER_MASK) | PER_LINUX32;
4389 ret = sys_personality(personality);
4390- if (ret == PER_LINUX32)
4391- ret = PER_LINUX;
4392+ if (personality(ret) == PER_LINUX32)
4393+ ret = (ret & ~PER_MASK) | PER_LINUX;
4394 return ret;
4395 }
4396 #endif
4397diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4398index ae0843f..f16372c 100644
4399--- a/arch/powerpc/kernel/traps.c
4400+++ b/arch/powerpc/kernel/traps.c
4401@@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
4402 return flags;
4403 }
4404
4405+extern void gr_handle_kernel_exploit(void);
4406+
4407 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4408 int signr)
4409 {
4410@@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4411 panic("Fatal exception in interrupt");
4412 if (panic_on_oops)
4413 panic("Fatal exception");
4414+
4415+ gr_handle_kernel_exploit();
4416+
4417 do_exit(signr);
4418 }
4419
4420diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4421index 9eb5b9b..e45498a 100644
4422--- a/arch/powerpc/kernel/vdso.c
4423+++ b/arch/powerpc/kernel/vdso.c
4424@@ -34,6 +34,7 @@
4425 #include <asm/firmware.h>
4426 #include <asm/vdso.h>
4427 #include <asm/vdso_datapage.h>
4428+#include <asm/mman.h>
4429
4430 #include "setup.h"
4431
4432@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4433 vdso_base = VDSO32_MBASE;
4434 #endif
4435
4436- current->mm->context.vdso_base = 0;
4437+ current->mm->context.vdso_base = ~0UL;
4438
4439 /* vDSO has a problem and was disabled, just don't "enable" it for the
4440 * process
4441@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4442 vdso_base = get_unmapped_area(NULL, vdso_base,
4443 (vdso_pages << PAGE_SHIFT) +
4444 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4445- 0, 0);
4446+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
4447 if (IS_ERR_VALUE(vdso_base)) {
4448 rc = vdso_base;
4449 goto fail_mmapsem;
4450diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4451index 5eea6f3..5d10396 100644
4452--- a/arch/powerpc/lib/usercopy_64.c
4453+++ b/arch/powerpc/lib/usercopy_64.c
4454@@ -9,22 +9,6 @@
4455 #include <linux/module.h>
4456 #include <asm/uaccess.h>
4457
4458-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4459-{
4460- if (likely(access_ok(VERIFY_READ, from, n)))
4461- n = __copy_from_user(to, from, n);
4462- else
4463- memset(to, 0, n);
4464- return n;
4465-}
4466-
4467-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4468-{
4469- if (likely(access_ok(VERIFY_WRITE, to, n)))
4470- n = __copy_to_user(to, from, n);
4471- return n;
4472-}
4473-
4474 unsigned long copy_in_user(void __user *to, const void __user *from,
4475 unsigned long n)
4476 {
4477@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4478 return n;
4479 }
4480
4481-EXPORT_SYMBOL(copy_from_user);
4482-EXPORT_SYMBOL(copy_to_user);
4483 EXPORT_SYMBOL(copy_in_user);
4484
4485diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4486index 08ffcf5..a0ab912 100644
4487--- a/arch/powerpc/mm/fault.c
4488+++ b/arch/powerpc/mm/fault.c
4489@@ -32,6 +32,10 @@
4490 #include <linux/perf_event.h>
4491 #include <linux/magic.h>
4492 #include <linux/ratelimit.h>
4493+#include <linux/slab.h>
4494+#include <linux/pagemap.h>
4495+#include <linux/compiler.h>
4496+#include <linux/unistd.h>
4497
4498 #include <asm/firmware.h>
4499 #include <asm/page.h>
4500@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4501 }
4502 #endif
4503
4504+#ifdef CONFIG_PAX_PAGEEXEC
4505+/*
4506+ * PaX: decide what to do with offenders (regs->nip = fault address)
4507+ *
4508+ * returns 1 when task should be killed
4509+ */
4510+static int pax_handle_fetch_fault(struct pt_regs *regs)
4511+{
4512+ return 1;
4513+}
4514+
4515+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4516+{
4517+ unsigned long i;
4518+
4519+ printk(KERN_ERR "PAX: bytes at PC: ");
4520+ for (i = 0; i < 5; i++) {
4521+ unsigned int c;
4522+ if (get_user(c, (unsigned int __user *)pc+i))
4523+ printk(KERN_CONT "???????? ");
4524+ else
4525+ printk(KERN_CONT "%08x ", c);
4526+ }
4527+ printk("\n");
4528+}
4529+#endif
4530+
4531 /*
4532 * Check whether the instruction at regs->nip is a store using
4533 * an update addressing form which will update r1.
4534@@ -215,7 +246,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4535 * indicate errors in DSISR but can validly be set in SRR1.
4536 */
4537 if (trap == 0x400)
4538- error_code &= 0x48200000;
4539+ error_code &= 0x58200000;
4540 else
4541 is_write = error_code & DSISR_ISSTORE;
4542 #else
4543@@ -366,7 +397,7 @@ good_area:
4544 * "undefined". Of those that can be set, this is the only
4545 * one which seems bad.
4546 */
4547- if (error_code & 0x10000000)
4548+ if (error_code & DSISR_GUARDED)
4549 /* Guarded storage error. */
4550 goto bad_area;
4551 #endif /* CONFIG_8xx */
4552@@ -381,7 +412,7 @@ good_area:
4553 * processors use the same I/D cache coherency mechanism
4554 * as embedded.
4555 */
4556- if (error_code & DSISR_PROTFAULT)
4557+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4558 goto bad_area;
4559 #endif /* CONFIG_PPC_STD_MMU */
4560
4561@@ -463,6 +494,23 @@ bad_area:
4562 bad_area_nosemaphore:
4563 /* User mode accesses cause a SIGSEGV */
4564 if (user_mode(regs)) {
4565+
4566+#ifdef CONFIG_PAX_PAGEEXEC
4567+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4568+#ifdef CONFIG_PPC_STD_MMU
4569+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4570+#else
4571+ if (is_exec && regs->nip == address) {
4572+#endif
4573+ switch (pax_handle_fetch_fault(regs)) {
4574+ }
4575+
4576+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4577+ do_group_exit(SIGKILL);
4578+ }
4579+ }
4580+#endif
4581+
4582 _exception(SIGSEGV, regs, code, address);
4583 return 0;
4584 }
4585diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4586index 67a42ed..1c7210c 100644
4587--- a/arch/powerpc/mm/mmap_64.c
4588+++ b/arch/powerpc/mm/mmap_64.c
4589@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4590 */
4591 if (mmap_is_legacy()) {
4592 mm->mmap_base = TASK_UNMAPPED_BASE;
4593+
4594+#ifdef CONFIG_PAX_RANDMMAP
4595+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4596+ mm->mmap_base += mm->delta_mmap;
4597+#endif
4598+
4599 mm->get_unmapped_area = arch_get_unmapped_area;
4600 mm->unmap_area = arch_unmap_area;
4601 } else {
4602 mm->mmap_base = mmap_base();
4603+
4604+#ifdef CONFIG_PAX_RANDMMAP
4605+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4606+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4607+#endif
4608+
4609 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4610 mm->unmap_area = arch_unmap_area_topdown;
4611 }
4612diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4613index 73709f7..6b90313 100644
4614--- a/arch/powerpc/mm/slice.c
4615+++ b/arch/powerpc/mm/slice.c
4616@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4617 if ((mm->task_size - len) < addr)
4618 return 0;
4619 vma = find_vma(mm, addr);
4620- return (!vma || (addr + len) <= vma->vm_start);
4621+ return check_heap_stack_gap(vma, addr, len);
4622 }
4623
4624 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4625@@ -256,7 +256,7 @@ full_search:
4626 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4627 continue;
4628 }
4629- if (!vma || addr + len <= vma->vm_start) {
4630+ if (check_heap_stack_gap(vma, addr, len)) {
4631 /*
4632 * Remember the place where we stopped the search:
4633 */
4634@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4635 }
4636 }
4637
4638- addr = mm->mmap_base;
4639- while (addr > len) {
4640+ if (mm->mmap_base < len)
4641+ addr = -ENOMEM;
4642+ else
4643+ addr = mm->mmap_base - len;
4644+
4645+ while (!IS_ERR_VALUE(addr)) {
4646 /* Go down by chunk size */
4647- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4648+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
4649
4650 /* Check for hit with different page size */
4651 mask = slice_range_to_mask(addr, len);
4652@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4653 * return with success:
4654 */
4655 vma = find_vma(mm, addr);
4656- if (!vma || (addr + len) <= vma->vm_start) {
4657+ if (check_heap_stack_gap(vma, addr, len)) {
4658 /* remember the address as a hint for next time */
4659 if (use_cache)
4660 mm->free_area_cache = addr;
4661@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4662 mm->cached_hole_size = vma->vm_start - addr;
4663
4664 /* try just below the current vma->vm_start */
4665- addr = vma->vm_start;
4666+ addr = skip_heap_stack_gap(vma, len);
4667 }
4668
4669 /*
4670@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4671 if (fixed && addr > (mm->task_size - len))
4672 return -EINVAL;
4673
4674+#ifdef CONFIG_PAX_RANDMMAP
4675+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4676+ addr = 0;
4677+#endif
4678+
4679 /* If hint, make sure it matches our alignment restrictions */
4680 if (!fixed && addr) {
4681 addr = _ALIGN_UP(addr, 1ul << pshift);
4682diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4683index 748347b..81bc6c7 100644
4684--- a/arch/s390/include/asm/atomic.h
4685+++ b/arch/s390/include/asm/atomic.h
4686@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4687 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4688 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4689
4690+#define atomic64_read_unchecked(v) atomic64_read(v)
4691+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4692+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4693+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4694+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4695+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4696+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4697+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4698+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4699+
4700 #define smp_mb__before_atomic_dec() smp_mb()
4701 #define smp_mb__after_atomic_dec() smp_mb()
4702 #define smp_mb__before_atomic_inc() smp_mb()
4703diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4704index 2a30d5a..5e5586f 100644
4705--- a/arch/s390/include/asm/cache.h
4706+++ b/arch/s390/include/asm/cache.h
4707@@ -11,8 +11,10 @@
4708 #ifndef __ARCH_S390_CACHE_H
4709 #define __ARCH_S390_CACHE_H
4710
4711-#define L1_CACHE_BYTES 256
4712+#include <linux/const.h>
4713+
4714 #define L1_CACHE_SHIFT 8
4715+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4716 #define NET_SKB_PAD 32
4717
4718 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4719diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4720index 06151e6..598f9a5 100644
4721--- a/arch/s390/include/asm/elf.h
4722+++ b/arch/s390/include/asm/elf.h
4723@@ -161,8 +161,14 @@ extern unsigned int vdso_enabled;
4724 the loader. We need to make sure that it is out of the way of the program
4725 that it will "exec", and that there is sufficient room for the brk. */
4726
4727-extern unsigned long randomize_et_dyn(unsigned long base);
4728-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4729+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4730+
4731+#ifdef CONFIG_PAX_ASLR
4732+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4733+
4734+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4735+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4736+#endif
4737
4738 /* This yields a mask that user programs can use to figure out what
4739 instruction set this CPU supports. */
4740@@ -182,7 +188,8 @@ extern char elf_platform[];
4741 #define ELF_PLATFORM (elf_platform)
4742
4743 #ifndef CONFIG_64BIT
4744-#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
4745+#define SET_PERSONALITY(ex) \
4746+ set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
4747 #else /* CONFIG_64BIT */
4748 #define SET_PERSONALITY(ex) \
4749 do { \
4750@@ -210,7 +217,4 @@ struct linux_binprm;
4751 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4752 int arch_setup_additional_pages(struct linux_binprm *, int);
4753
4754-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4755-#define arch_randomize_brk arch_randomize_brk
4756-
4757 #endif
4758diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
4759index c4a93d6..4d2a9b4 100644
4760--- a/arch/s390/include/asm/exec.h
4761+++ b/arch/s390/include/asm/exec.h
4762@@ -7,6 +7,6 @@
4763 #ifndef __ASM_EXEC_H
4764 #define __ASM_EXEC_H
4765
4766-extern unsigned long arch_align_stack(unsigned long sp);
4767+#define arch_align_stack(x) ((x) & ~0xfUL)
4768
4769 #endif /* __ASM_EXEC_H */
4770diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4771index 1f3a79b..44d7f9c 100644
4772--- a/arch/s390/include/asm/uaccess.h
4773+++ b/arch/s390/include/asm/uaccess.h
4774@@ -241,6 +241,10 @@ static inline unsigned long __must_check
4775 copy_to_user(void __user *to, const void *from, unsigned long n)
4776 {
4777 might_fault();
4778+
4779+ if ((long)n < 0)
4780+ return n;
4781+
4782 if (access_ok(VERIFY_WRITE, to, n))
4783 n = __copy_to_user(to, from, n);
4784 return n;
4785@@ -266,6 +270,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4786 static inline unsigned long __must_check
4787 __copy_from_user(void *to, const void __user *from, unsigned long n)
4788 {
4789+ if ((long)n < 0)
4790+ return n;
4791+
4792 if (__builtin_constant_p(n) && (n <= 256))
4793 return uaccess.copy_from_user_small(n, from, to);
4794 else
4795@@ -297,10 +304,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
4796 static inline unsigned long __must_check
4797 copy_from_user(void *to, const void __user *from, unsigned long n)
4798 {
4799- unsigned int sz = __compiletime_object_size(to);
4800+ size_t sz = __compiletime_object_size(to);
4801
4802 might_fault();
4803- if (unlikely(sz != -1 && sz < n)) {
4804+
4805+ if ((long)n < 0)
4806+ return n;
4807+
4808+ if (unlikely(sz != (size_t)-1 && sz < n)) {
4809 copy_from_user_overflow();
4810 return n;
4811 }
4812diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4813index dfcb343..eda788a 100644
4814--- a/arch/s390/kernel/module.c
4815+++ b/arch/s390/kernel/module.c
4816@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4817
4818 /* Increase core size by size of got & plt and set start
4819 offsets for got and plt. */
4820- me->core_size = ALIGN(me->core_size, 4);
4821- me->arch.got_offset = me->core_size;
4822- me->core_size += me->arch.got_size;
4823- me->arch.plt_offset = me->core_size;
4824- me->core_size += me->arch.plt_size;
4825+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
4826+ me->arch.got_offset = me->core_size_rw;
4827+ me->core_size_rw += me->arch.got_size;
4828+ me->arch.plt_offset = me->core_size_rx;
4829+ me->core_size_rx += me->arch.plt_size;
4830 return 0;
4831 }
4832
4833@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4834 if (info->got_initialized == 0) {
4835 Elf_Addr *gotent;
4836
4837- gotent = me->module_core + me->arch.got_offset +
4838+ gotent = me->module_core_rw + me->arch.got_offset +
4839 info->got_offset;
4840 *gotent = val;
4841 info->got_initialized = 1;
4842@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4843 else if (r_type == R_390_GOTENT ||
4844 r_type == R_390_GOTPLTENT)
4845 *(unsigned int *) loc =
4846- (val + (Elf_Addr) me->module_core - loc) >> 1;
4847+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4848 else if (r_type == R_390_GOT64 ||
4849 r_type == R_390_GOTPLT64)
4850 *(unsigned long *) loc = val;
4851@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4852 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4853 if (info->plt_initialized == 0) {
4854 unsigned int *ip;
4855- ip = me->module_core + me->arch.plt_offset +
4856+ ip = me->module_core_rx + me->arch.plt_offset +
4857 info->plt_offset;
4858 #ifndef CONFIG_64BIT
4859 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4860@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4861 val - loc + 0xffffUL < 0x1ffffeUL) ||
4862 (r_type == R_390_PLT32DBL &&
4863 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4864- val = (Elf_Addr) me->module_core +
4865+ val = (Elf_Addr) me->module_core_rx +
4866 me->arch.plt_offset +
4867 info->plt_offset;
4868 val += rela->r_addend - loc;
4869@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4870 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4871 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4872 val = val + rela->r_addend -
4873- ((Elf_Addr) me->module_core + me->arch.got_offset);
4874+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4875 if (r_type == R_390_GOTOFF16)
4876 *(unsigned short *) loc = val;
4877 else if (r_type == R_390_GOTOFF32)
4878@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4879 break;
4880 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4881 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4882- val = (Elf_Addr) me->module_core + me->arch.got_offset +
4883+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4884 rela->r_addend - loc;
4885 if (r_type == R_390_GOTPC)
4886 *(unsigned int *) loc = val;
4887diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4888index 60055ce..ee4b252 100644
4889--- a/arch/s390/kernel/process.c
4890+++ b/arch/s390/kernel/process.c
4891@@ -316,39 +316,3 @@ unsigned long get_wchan(struct task_struct *p)
4892 }
4893 return 0;
4894 }
4895-
4896-unsigned long arch_align_stack(unsigned long sp)
4897-{
4898- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4899- sp -= get_random_int() & ~PAGE_MASK;
4900- return sp & ~0xf;
4901-}
4902-
4903-static inline unsigned long brk_rnd(void)
4904-{
4905- /* 8MB for 32bit, 1GB for 64bit */
4906- if (is_32bit_task())
4907- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4908- else
4909- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4910-}
4911-
4912-unsigned long arch_randomize_brk(struct mm_struct *mm)
4913-{
4914- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4915-
4916- if (ret < mm->brk)
4917- return mm->brk;
4918- return ret;
4919-}
4920-
4921-unsigned long randomize_et_dyn(unsigned long base)
4922-{
4923- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4924-
4925- if (!(current->flags & PF_RANDOMIZE))
4926- return base;
4927- if (ret < base)
4928- return base;
4929- return ret;
4930-}
4931diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4932index a64fe53..5c66963 100644
4933--- a/arch/s390/mm/mmap.c
4934+++ b/arch/s390/mm/mmap.c
4935@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4936 */
4937 if (mmap_is_legacy()) {
4938 mm->mmap_base = TASK_UNMAPPED_BASE;
4939+
4940+#ifdef CONFIG_PAX_RANDMMAP
4941+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4942+ mm->mmap_base += mm->delta_mmap;
4943+#endif
4944+
4945 mm->get_unmapped_area = arch_get_unmapped_area;
4946 mm->unmap_area = arch_unmap_area;
4947 } else {
4948 mm->mmap_base = mmap_base();
4949+
4950+#ifdef CONFIG_PAX_RANDMMAP
4951+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4952+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4953+#endif
4954+
4955 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4956 mm->unmap_area = arch_unmap_area_topdown;
4957 }
4958@@ -174,10 +186,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4959 */
4960 if (mmap_is_legacy()) {
4961 mm->mmap_base = TASK_UNMAPPED_BASE;
4962+
4963+#ifdef CONFIG_PAX_RANDMMAP
4964+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4965+ mm->mmap_base += mm->delta_mmap;
4966+#endif
4967+
4968 mm->get_unmapped_area = s390_get_unmapped_area;
4969 mm->unmap_area = arch_unmap_area;
4970 } else {
4971 mm->mmap_base = mmap_base();
4972+
4973+#ifdef CONFIG_PAX_RANDMMAP
4974+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4975+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4976+#endif
4977+
4978 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4979 mm->unmap_area = arch_unmap_area_topdown;
4980 }
4981diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4982index ae3d59f..f65f075 100644
4983--- a/arch/score/include/asm/cache.h
4984+++ b/arch/score/include/asm/cache.h
4985@@ -1,7 +1,9 @@
4986 #ifndef _ASM_SCORE_CACHE_H
4987 #define _ASM_SCORE_CACHE_H
4988
4989+#include <linux/const.h>
4990+
4991 #define L1_CACHE_SHIFT 4
4992-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4993+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4994
4995 #endif /* _ASM_SCORE_CACHE_H */
4996diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
4997index f9f3cd5..58ff438 100644
4998--- a/arch/score/include/asm/exec.h
4999+++ b/arch/score/include/asm/exec.h
5000@@ -1,6 +1,6 @@
5001 #ifndef _ASM_SCORE_EXEC_H
5002 #define _ASM_SCORE_EXEC_H
5003
5004-extern unsigned long arch_align_stack(unsigned long sp);
5005+#define arch_align_stack(x) (x)
5006
5007 #endif /* _ASM_SCORE_EXEC_H */
5008diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
5009index 2707023..1c2a3b7 100644
5010--- a/arch/score/kernel/process.c
5011+++ b/arch/score/kernel/process.c
5012@@ -159,8 +159,3 @@ unsigned long get_wchan(struct task_struct *task)
5013
5014 return task_pt_regs(task)->cp0_epc;
5015 }
5016-
5017-unsigned long arch_align_stack(unsigned long sp)
5018-{
5019- return sp;
5020-}
5021diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
5022index ef9e555..331bd29 100644
5023--- a/arch/sh/include/asm/cache.h
5024+++ b/arch/sh/include/asm/cache.h
5025@@ -9,10 +9,11 @@
5026 #define __ASM_SH_CACHE_H
5027 #ifdef __KERNEL__
5028
5029+#include <linux/const.h>
5030 #include <linux/init.h>
5031 #include <cpu/cache.h>
5032
5033-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5034+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5035
5036 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5037
5038diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
5039index afeb710..d1d1289 100644
5040--- a/arch/sh/mm/mmap.c
5041+++ b/arch/sh/mm/mmap.c
5042@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5043 addr = PAGE_ALIGN(addr);
5044
5045 vma = find_vma(mm, addr);
5046- if (TASK_SIZE - len >= addr &&
5047- (!vma || addr + len <= vma->vm_start))
5048+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5049 return addr;
5050 }
5051
5052@@ -106,7 +105,7 @@ full_search:
5053 }
5054 return -ENOMEM;
5055 }
5056- if (likely(!vma || addr + len <= vma->vm_start)) {
5057+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5058 /*
5059 * Remember the place where we stopped the search:
5060 */
5061@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5062 addr = PAGE_ALIGN(addr);
5063
5064 vma = find_vma(mm, addr);
5065- if (TASK_SIZE - len >= addr &&
5066- (!vma || addr + len <= vma->vm_start))
5067+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5068 return addr;
5069 }
5070
5071@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5072 /* make sure it can fit in the remaining address space */
5073 if (likely(addr > len)) {
5074 vma = find_vma(mm, addr-len);
5075- if (!vma || addr <= vma->vm_start) {
5076+ if (check_heap_stack_gap(vma, addr - len, len)) {
5077 /* remember the address as a hint for next time */
5078 return (mm->free_area_cache = addr-len);
5079 }
5080@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5081 if (unlikely(mm->mmap_base < len))
5082 goto bottomup;
5083
5084- addr = mm->mmap_base-len;
5085- if (do_colour_align)
5086- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5087+ addr = mm->mmap_base - len;
5088
5089 do {
5090+ if (do_colour_align)
5091+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5092 /*
5093 * Lookup failure means no vma is above this address,
5094 * else if new region fits below vma->vm_start,
5095 * return with success:
5096 */
5097 vma = find_vma(mm, addr);
5098- if (likely(!vma || addr+len <= vma->vm_start)) {
5099+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5100 /* remember the address as a hint for next time */
5101 return (mm->free_area_cache = addr);
5102 }
5103@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5104 mm->cached_hole_size = vma->vm_start - addr;
5105
5106 /* try just below the current vma->vm_start */
5107- addr = vma->vm_start-len;
5108- if (do_colour_align)
5109- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5110- } while (likely(len < vma->vm_start));
5111+ addr = skip_heap_stack_gap(vma, len);
5112+ } while (!IS_ERR_VALUE(addr));
5113
5114 bottomup:
5115 /*
5116diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5117index ce35a1c..2e7b8f9 100644
5118--- a/arch/sparc/include/asm/atomic_64.h
5119+++ b/arch/sparc/include/asm/atomic_64.h
5120@@ -14,18 +14,40 @@
5121 #define ATOMIC64_INIT(i) { (i) }
5122
5123 #define atomic_read(v) (*(volatile int *)&(v)->counter)
5124+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5125+{
5126+ return v->counter;
5127+}
5128 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
5129+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5130+{
5131+ return v->counter;
5132+}
5133
5134 #define atomic_set(v, i) (((v)->counter) = i)
5135+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5136+{
5137+ v->counter = i;
5138+}
5139 #define atomic64_set(v, i) (((v)->counter) = i)
5140+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5141+{
5142+ v->counter = i;
5143+}
5144
5145 extern void atomic_add(int, atomic_t *);
5146+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5147 extern void atomic64_add(long, atomic64_t *);
5148+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5149 extern void atomic_sub(int, atomic_t *);
5150+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5151 extern void atomic64_sub(long, atomic64_t *);
5152+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5153
5154 extern int atomic_add_ret(int, atomic_t *);
5155+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5156 extern long atomic64_add_ret(long, atomic64_t *);
5157+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5158 extern int atomic_sub_ret(int, atomic_t *);
5159 extern long atomic64_sub_ret(long, atomic64_t *);
5160
5161@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5162 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5163
5164 #define atomic_inc_return(v) atomic_add_ret(1, v)
5165+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5166+{
5167+ return atomic_add_ret_unchecked(1, v);
5168+}
5169 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5170+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5171+{
5172+ return atomic64_add_ret_unchecked(1, v);
5173+}
5174
5175 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5176 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5177
5178 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5179+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5180+{
5181+ return atomic_add_ret_unchecked(i, v);
5182+}
5183 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5184+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5185+{
5186+ return atomic64_add_ret_unchecked(i, v);
5187+}
5188
5189 /*
5190 * atomic_inc_and_test - increment and test
5191@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5192 * other cases.
5193 */
5194 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5195+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5196+{
5197+ return atomic_inc_return_unchecked(v) == 0;
5198+}
5199 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5200
5201 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5202@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5203 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5204
5205 #define atomic_inc(v) atomic_add(1, v)
5206+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5207+{
5208+ atomic_add_unchecked(1, v);
5209+}
5210 #define atomic64_inc(v) atomic64_add(1, v)
5211+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5212+{
5213+ atomic64_add_unchecked(1, v);
5214+}
5215
5216 #define atomic_dec(v) atomic_sub(1, v)
5217+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5218+{
5219+ atomic_sub_unchecked(1, v);
5220+}
5221 #define atomic64_dec(v) atomic64_sub(1, v)
5222+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5223+{
5224+ atomic64_sub_unchecked(1, v);
5225+}
5226
5227 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5228 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5229
5230 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5231+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5232+{
5233+ return cmpxchg(&v->counter, old, new);
5234+}
5235 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5236+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5237+{
5238+ return xchg(&v->counter, new);
5239+}
5240
5241 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5242 {
5243- int c, old;
5244+ int c, old, new;
5245 c = atomic_read(v);
5246 for (;;) {
5247- if (unlikely(c == (u)))
5248+ if (unlikely(c == u))
5249 break;
5250- old = atomic_cmpxchg((v), c, c + (a));
5251+
5252+ asm volatile("addcc %2, %0, %0\n"
5253+
5254+#ifdef CONFIG_PAX_REFCOUNT
5255+ "tvs %%icc, 6\n"
5256+#endif
5257+
5258+ : "=r" (new)
5259+ : "0" (c), "ir" (a)
5260+ : "cc");
5261+
5262+ old = atomic_cmpxchg(v, c, new);
5263 if (likely(old == c))
5264 break;
5265 c = old;
5266@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5267 #define atomic64_cmpxchg(v, o, n) \
5268 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5269 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5270+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5271+{
5272+ return xchg(&v->counter, new);
5273+}
5274
5275 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5276 {
5277- long c, old;
5278+ long c, old, new;
5279 c = atomic64_read(v);
5280 for (;;) {
5281- if (unlikely(c == (u)))
5282+ if (unlikely(c == u))
5283 break;
5284- old = atomic64_cmpxchg((v), c, c + (a));
5285+
5286+ asm volatile("addcc %2, %0, %0\n"
5287+
5288+#ifdef CONFIG_PAX_REFCOUNT
5289+ "tvs %%xcc, 6\n"
5290+#endif
5291+
5292+ : "=r" (new)
5293+ : "0" (c), "ir" (a)
5294+ : "cc");
5295+
5296+ old = atomic64_cmpxchg(v, c, new);
5297 if (likely(old == c))
5298 break;
5299 c = old;
5300 }
5301- return c != (u);
5302+ return c != u;
5303 }
5304
5305 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5306diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5307index 5bb6991..5c2132e 100644
5308--- a/arch/sparc/include/asm/cache.h
5309+++ b/arch/sparc/include/asm/cache.h
5310@@ -7,10 +7,12 @@
5311 #ifndef _SPARC_CACHE_H
5312 #define _SPARC_CACHE_H
5313
5314+#include <linux/const.h>
5315+
5316 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
5317
5318 #define L1_CACHE_SHIFT 5
5319-#define L1_CACHE_BYTES 32
5320+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5321
5322 #ifdef CONFIG_SPARC32
5323 #define SMP_CACHE_BYTES_SHIFT 5
5324diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5325index 2d4d755..81b6662 100644
5326--- a/arch/sparc/include/asm/elf_32.h
5327+++ b/arch/sparc/include/asm/elf_32.h
5328@@ -114,6 +114,13 @@ typedef struct {
5329
5330 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5331
5332+#ifdef CONFIG_PAX_ASLR
5333+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5334+
5335+#define PAX_DELTA_MMAP_LEN 16
5336+#define PAX_DELTA_STACK_LEN 16
5337+#endif
5338+
5339 /* This yields a mask that user programs can use to figure out what
5340 instruction set this cpu supports. This can NOT be done in userspace
5341 on Sparc. */
5342diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5343index 7df8b7f..4946269 100644
5344--- a/arch/sparc/include/asm/elf_64.h
5345+++ b/arch/sparc/include/asm/elf_64.h
5346@@ -180,6 +180,13 @@ typedef struct {
5347 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5348 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5349
5350+#ifdef CONFIG_PAX_ASLR
5351+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5352+
5353+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5354+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5355+#endif
5356+
5357 extern unsigned long sparc64_elf_hwcap;
5358 #define ELF_HWCAP sparc64_elf_hwcap
5359
5360diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
5361index e5b169b46..e90b4fa 100644
5362--- a/arch/sparc/include/asm/pgalloc_32.h
5363+++ b/arch/sparc/include/asm/pgalloc_32.h
5364@@ -46,6 +46,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
5365 }
5366
5367 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
5368+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
5369
5370 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
5371 unsigned long address)
5372diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
5373index 40b2d7a..22a665b 100644
5374--- a/arch/sparc/include/asm/pgalloc_64.h
5375+++ b/arch/sparc/include/asm/pgalloc_64.h
5376@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5377 }
5378
5379 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
5380+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
5381
5382 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5383 {
5384diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5385index cbbbed5..97f72f9 100644
5386--- a/arch/sparc/include/asm/pgtable_32.h
5387+++ b/arch/sparc/include/asm/pgtable_32.h
5388@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
5389 #define PAGE_SHARED SRMMU_PAGE_SHARED
5390 #define PAGE_COPY SRMMU_PAGE_COPY
5391 #define PAGE_READONLY SRMMU_PAGE_RDONLY
5392+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
5393+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
5394+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
5395 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
5396
5397 /* Top-level page directory */
5398@@ -61,18 +64,18 @@ extern unsigned long ptr_in_current_pgd;
5399
5400 /* xwr */
5401 #define __P000 PAGE_NONE
5402-#define __P001 PAGE_READONLY
5403-#define __P010 PAGE_COPY
5404-#define __P011 PAGE_COPY
5405+#define __P001 PAGE_READONLY_NOEXEC
5406+#define __P010 PAGE_COPY_NOEXEC
5407+#define __P011 PAGE_COPY_NOEXEC
5408 #define __P100 PAGE_READONLY
5409 #define __P101 PAGE_READONLY
5410 #define __P110 PAGE_COPY
5411 #define __P111 PAGE_COPY
5412
5413 #define __S000 PAGE_NONE
5414-#define __S001 PAGE_READONLY
5415-#define __S010 PAGE_SHARED
5416-#define __S011 PAGE_SHARED
5417+#define __S001 PAGE_READONLY_NOEXEC
5418+#define __S010 PAGE_SHARED_NOEXEC
5419+#define __S011 PAGE_SHARED_NOEXEC
5420 #define __S100 PAGE_READONLY
5421 #define __S101 PAGE_READONLY
5422 #define __S110 PAGE_SHARED
5423diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5424index 79da178..c2eede8 100644
5425--- a/arch/sparc/include/asm/pgtsrmmu.h
5426+++ b/arch/sparc/include/asm/pgtsrmmu.h
5427@@ -115,6 +115,11 @@
5428 SRMMU_EXEC | SRMMU_REF)
5429 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5430 SRMMU_EXEC | SRMMU_REF)
5431+
5432+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5433+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5434+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5435+
5436 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5437 SRMMU_DIRTY | SRMMU_REF)
5438
5439diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5440index 9689176..63c18ea 100644
5441--- a/arch/sparc/include/asm/spinlock_64.h
5442+++ b/arch/sparc/include/asm/spinlock_64.h
5443@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
5444
5445 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5446
5447-static void inline arch_read_lock(arch_rwlock_t *lock)
5448+static inline void arch_read_lock(arch_rwlock_t *lock)
5449 {
5450 unsigned long tmp1, tmp2;
5451
5452 __asm__ __volatile__ (
5453 "1: ldsw [%2], %0\n"
5454 " brlz,pn %0, 2f\n"
5455-"4: add %0, 1, %1\n"
5456+"4: addcc %0, 1, %1\n"
5457+
5458+#ifdef CONFIG_PAX_REFCOUNT
5459+" tvs %%icc, 6\n"
5460+#endif
5461+
5462 " cas [%2], %0, %1\n"
5463 " cmp %0, %1\n"
5464 " bne,pn %%icc, 1b\n"
5465@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
5466 " .previous"
5467 : "=&r" (tmp1), "=&r" (tmp2)
5468 : "r" (lock)
5469- : "memory");
5470+ : "memory", "cc");
5471 }
5472
5473-static int inline arch_read_trylock(arch_rwlock_t *lock)
5474+static inline int arch_read_trylock(arch_rwlock_t *lock)
5475 {
5476 int tmp1, tmp2;
5477
5478@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5479 "1: ldsw [%2], %0\n"
5480 " brlz,a,pn %0, 2f\n"
5481 " mov 0, %0\n"
5482-" add %0, 1, %1\n"
5483+" addcc %0, 1, %1\n"
5484+
5485+#ifdef CONFIG_PAX_REFCOUNT
5486+" tvs %%icc, 6\n"
5487+#endif
5488+
5489 " cas [%2], %0, %1\n"
5490 " cmp %0, %1\n"
5491 " bne,pn %%icc, 1b\n"
5492@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5493 return tmp1;
5494 }
5495
5496-static void inline arch_read_unlock(arch_rwlock_t *lock)
5497+static inline void arch_read_unlock(arch_rwlock_t *lock)
5498 {
5499 unsigned long tmp1, tmp2;
5500
5501 __asm__ __volatile__(
5502 "1: lduw [%2], %0\n"
5503-" sub %0, 1, %1\n"
5504+" subcc %0, 1, %1\n"
5505+
5506+#ifdef CONFIG_PAX_REFCOUNT
5507+" tvs %%icc, 6\n"
5508+#endif
5509+
5510 " cas [%2], %0, %1\n"
5511 " cmp %0, %1\n"
5512 " bne,pn %%xcc, 1b\n"
5513@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5514 : "memory");
5515 }
5516
5517-static void inline arch_write_lock(arch_rwlock_t *lock)
5518+static inline void arch_write_lock(arch_rwlock_t *lock)
5519 {
5520 unsigned long mask, tmp1, tmp2;
5521
5522@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5523 : "memory");
5524 }
5525
5526-static void inline arch_write_unlock(arch_rwlock_t *lock)
5527+static inline void arch_write_unlock(arch_rwlock_t *lock)
5528 {
5529 __asm__ __volatile__(
5530 " stw %%g0, [%0]"
5531@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5532 : "memory");
5533 }
5534
5535-static int inline arch_write_trylock(arch_rwlock_t *lock)
5536+static inline int arch_write_trylock(arch_rwlock_t *lock)
5537 {
5538 unsigned long mask, tmp1, tmp2, result;
5539
5540diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5541index e6cd224..3a71793 100644
5542--- a/arch/sparc/include/asm/thread_info_32.h
5543+++ b/arch/sparc/include/asm/thread_info_32.h
5544@@ -49,6 +49,8 @@ struct thread_info {
5545 unsigned long w_saved;
5546
5547 struct restart_block restart_block;
5548+
5549+ unsigned long lowest_stack;
5550 };
5551
5552 /*
5553diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5554index cfa8c38..13f30d3 100644
5555--- a/arch/sparc/include/asm/thread_info_64.h
5556+++ b/arch/sparc/include/asm/thread_info_64.h
5557@@ -63,6 +63,8 @@ struct thread_info {
5558 struct pt_regs *kern_una_regs;
5559 unsigned int kern_una_insn;
5560
5561+ unsigned long lowest_stack;
5562+
5563 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5564 };
5565
5566@@ -193,10 +195,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
5567 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
5568 /* flag bit 6 is available */
5569 #define TIF_32BIT 7 /* 32-bit binary */
5570-/* flag bit 8 is available */
5571+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
5572 #define TIF_SECCOMP 9 /* secure computing */
5573 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
5574 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
5575+
5576 /* NOTE: Thread flags >= 12 should be ones we have no interest
5577 * in using in assembly, else we can't use the mask as
5578 * an immediate value in instructions such as andcc.
5579@@ -215,12 +218,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
5580 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
5581 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
5582 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
5583+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5584
5585 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
5586 _TIF_DO_NOTIFY_RESUME_MASK | \
5587 _TIF_NEED_RESCHED)
5588 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
5589
5590+#define _TIF_WORK_SYSCALL \
5591+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
5592+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
5593+
5594+
5595 /*
5596 * Thread-synchronous status.
5597 *
5598diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5599index 0167d26..9acd8ed 100644
5600--- a/arch/sparc/include/asm/uaccess.h
5601+++ b/arch/sparc/include/asm/uaccess.h
5602@@ -1,5 +1,13 @@
5603 #ifndef ___ASM_SPARC_UACCESS_H
5604 #define ___ASM_SPARC_UACCESS_H
5605+
5606+#ifdef __KERNEL__
5607+#ifndef __ASSEMBLY__
5608+#include <linux/types.h>
5609+extern void check_object_size(const void *ptr, unsigned long n, bool to);
5610+#endif
5611+#endif
5612+
5613 #if defined(__sparc__) && defined(__arch64__)
5614 #include <asm/uaccess_64.h>
5615 #else
5616diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5617index 53a28dd..50c38c3 100644
5618--- a/arch/sparc/include/asm/uaccess_32.h
5619+++ b/arch/sparc/include/asm/uaccess_32.h
5620@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5621
5622 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5623 {
5624- if (n && __access_ok((unsigned long) to, n))
5625+ if ((long)n < 0)
5626+ return n;
5627+
5628+ if (n && __access_ok((unsigned long) to, n)) {
5629+ if (!__builtin_constant_p(n))
5630+ check_object_size(from, n, true);
5631 return __copy_user(to, (__force void __user *) from, n);
5632- else
5633+ } else
5634 return n;
5635 }
5636
5637 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5638 {
5639+ if ((long)n < 0)
5640+ return n;
5641+
5642+ if (!__builtin_constant_p(n))
5643+ check_object_size(from, n, true);
5644+
5645 return __copy_user(to, (__force void __user *) from, n);
5646 }
5647
5648 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5649 {
5650- if (n && __access_ok((unsigned long) from, n))
5651+ if ((long)n < 0)
5652+ return n;
5653+
5654+ if (n && __access_ok((unsigned long) from, n)) {
5655+ if (!__builtin_constant_p(n))
5656+ check_object_size(to, n, false);
5657 return __copy_user((__force void __user *) to, from, n);
5658- else
5659+ } else
5660 return n;
5661 }
5662
5663 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5664 {
5665+ if ((long)n < 0)
5666+ return n;
5667+
5668 return __copy_user((__force void __user *) to, from, n);
5669 }
5670
5671diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5672index 7c831d8..d440ca7 100644
5673--- a/arch/sparc/include/asm/uaccess_64.h
5674+++ b/arch/sparc/include/asm/uaccess_64.h
5675@@ -10,6 +10,7 @@
5676 #include <linux/compiler.h>
5677 #include <linux/string.h>
5678 #include <linux/thread_info.h>
5679+#include <linux/kernel.h>
5680 #include <asm/asi.h>
5681 #include <asm/spitfire.h>
5682 #include <asm-generic/uaccess-unaligned.h>
5683@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5684 static inline unsigned long __must_check
5685 copy_from_user(void *to, const void __user *from, unsigned long size)
5686 {
5687- unsigned long ret = ___copy_from_user(to, from, size);
5688+ unsigned long ret;
5689
5690+ if ((long)size < 0 || size > INT_MAX)
5691+ return size;
5692+
5693+ if (!__builtin_constant_p(size))
5694+ check_object_size(to, size, false);
5695+
5696+ ret = ___copy_from_user(to, from, size);
5697 if (unlikely(ret))
5698 ret = copy_from_user_fixup(to, from, size);
5699
5700@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5701 static inline unsigned long __must_check
5702 copy_to_user(void __user *to, const void *from, unsigned long size)
5703 {
5704- unsigned long ret = ___copy_to_user(to, from, size);
5705+ unsigned long ret;
5706
5707+ if ((long)size < 0 || size > INT_MAX)
5708+ return size;
5709+
5710+ if (!__builtin_constant_p(size))
5711+ check_object_size(from, size, true);
5712+
5713+ ret = ___copy_to_user(to, from, size);
5714 if (unlikely(ret))
5715 ret = copy_to_user_fixup(to, from, size);
5716 return ret;
5717diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5718index 6cf591b..b49e65a 100644
5719--- a/arch/sparc/kernel/Makefile
5720+++ b/arch/sparc/kernel/Makefile
5721@@ -3,7 +3,7 @@
5722 #
5723
5724 asflags-y := -ansi
5725-ccflags-y := -Werror
5726+#ccflags-y := -Werror
5727
5728 extra-y := head_$(BITS).o
5729
5730diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5731index cb36e82..1c1462f 100644
5732--- a/arch/sparc/kernel/process_32.c
5733+++ b/arch/sparc/kernel/process_32.c
5734@@ -126,14 +126,14 @@ void show_regs(struct pt_regs *r)
5735
5736 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5737 r->psr, r->pc, r->npc, r->y, print_tainted());
5738- printk("PC: <%pS>\n", (void *) r->pc);
5739+ printk("PC: <%pA>\n", (void *) r->pc);
5740 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5741 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5742 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5743 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5744 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5745 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5746- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5747+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5748
5749 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5750 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5751@@ -168,7 +168,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5752 rw = (struct reg_window32 *) fp;
5753 pc = rw->ins[7];
5754 printk("[%08lx : ", pc);
5755- printk("%pS ] ", (void *) pc);
5756+ printk("%pA ] ", (void *) pc);
5757 fp = rw->ins[6];
5758 } while (++count < 16);
5759 printk("\n");
5760diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5761index aff0c72..9067b39 100644
5762--- a/arch/sparc/kernel/process_64.c
5763+++ b/arch/sparc/kernel/process_64.c
5764@@ -179,14 +179,14 @@ static void show_regwindow(struct pt_regs *regs)
5765 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5766 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5767 if (regs->tstate & TSTATE_PRIV)
5768- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5769+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5770 }
5771
5772 void show_regs(struct pt_regs *regs)
5773 {
5774 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5775 regs->tpc, regs->tnpc, regs->y, print_tainted());
5776- printk("TPC: <%pS>\n", (void *) regs->tpc);
5777+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5778 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5779 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5780 regs->u_regs[3]);
5781@@ -199,7 +199,7 @@ void show_regs(struct pt_regs *regs)
5782 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5783 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5784 regs->u_regs[15]);
5785- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5786+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5787 show_regwindow(regs);
5788 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5789 }
5790@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5791 ((tp && tp->task) ? tp->task->pid : -1));
5792
5793 if (gp->tstate & TSTATE_PRIV) {
5794- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5795+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5796 (void *) gp->tpc,
5797 (void *) gp->o7,
5798 (void *) gp->i7,
5799diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
5800index 484daba..0674139 100644
5801--- a/arch/sparc/kernel/ptrace_64.c
5802+++ b/arch/sparc/kernel/ptrace_64.c
5803@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
5804 return ret;
5805 }
5806
5807+#ifdef CONFIG_GRKERNSEC_SETXID
5808+extern void gr_delayed_cred_worker(void);
5809+#endif
5810+
5811 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5812 {
5813 int ret = 0;
5814@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5815 /* do the secure computing check first */
5816 secure_computing_strict(regs->u_regs[UREG_G1]);
5817
5818+#ifdef CONFIG_GRKERNSEC_SETXID
5819+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5820+ gr_delayed_cred_worker();
5821+#endif
5822+
5823 if (test_thread_flag(TIF_SYSCALL_TRACE))
5824 ret = tracehook_report_syscall_entry(regs);
5825
5826@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5827
5828 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
5829 {
5830+#ifdef CONFIG_GRKERNSEC_SETXID
5831+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5832+ gr_delayed_cred_worker();
5833+#endif
5834+
5835 audit_syscall_exit(regs);
5836
5837 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5838diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5839index 0c9b31b..7cb7aee 100644
5840--- a/arch/sparc/kernel/sys_sparc_32.c
5841+++ b/arch/sparc/kernel/sys_sparc_32.c
5842@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5843 if (len > TASK_SIZE - PAGE_SIZE)
5844 return -ENOMEM;
5845 if (!addr)
5846- addr = TASK_UNMAPPED_BASE;
5847+ addr = current->mm->mmap_base;
5848
5849 if (flags & MAP_SHARED)
5850 addr = COLOUR_ALIGN(addr);
5851@@ -65,7 +65,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5852 /* At this point: (!vmm || addr < vmm->vm_end). */
5853 if (TASK_SIZE - PAGE_SIZE - len < addr)
5854 return -ENOMEM;
5855- if (!vmm || addr + len <= vmm->vm_start)
5856+ if (check_heap_stack_gap(vmm, addr, len))
5857 return addr;
5858 addr = vmm->vm_end;
5859 if (flags & MAP_SHARED)
5860diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5861index 275f74f..81bf5b8 100644
5862--- a/arch/sparc/kernel/sys_sparc_64.c
5863+++ b/arch/sparc/kernel/sys_sparc_64.c
5864@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5865 /* We do not accept a shared mapping if it would violate
5866 * cache aliasing constraints.
5867 */
5868- if ((flags & MAP_SHARED) &&
5869+ if ((filp || (flags & MAP_SHARED)) &&
5870 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5871 return -EINVAL;
5872 return addr;
5873@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5874 if (filp || (flags & MAP_SHARED))
5875 do_color_align = 1;
5876
5877+#ifdef CONFIG_PAX_RANDMMAP
5878+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5879+#endif
5880+
5881 if (addr) {
5882 if (do_color_align)
5883 addr = COLOUR_ALIGN(addr, pgoff);
5884@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5885 addr = PAGE_ALIGN(addr);
5886
5887 vma = find_vma(mm, addr);
5888- if (task_size - len >= addr &&
5889- (!vma || addr + len <= vma->vm_start))
5890+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5891 return addr;
5892 }
5893
5894 if (len > mm->cached_hole_size) {
5895- start_addr = addr = mm->free_area_cache;
5896+ start_addr = addr = mm->free_area_cache;
5897 } else {
5898- start_addr = addr = TASK_UNMAPPED_BASE;
5899+ start_addr = addr = mm->mmap_base;
5900 mm->cached_hole_size = 0;
5901 }
5902
5903@@ -174,14 +177,14 @@ full_search:
5904 vma = find_vma(mm, VA_EXCLUDE_END);
5905 }
5906 if (unlikely(task_size < addr)) {
5907- if (start_addr != TASK_UNMAPPED_BASE) {
5908- start_addr = addr = TASK_UNMAPPED_BASE;
5909+ if (start_addr != mm->mmap_base) {
5910+ start_addr = addr = mm->mmap_base;
5911 mm->cached_hole_size = 0;
5912 goto full_search;
5913 }
5914 return -ENOMEM;
5915 }
5916- if (likely(!vma || addr + len <= vma->vm_start)) {
5917+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5918 /*
5919 * Remember the place where we stopped the search:
5920 */
5921@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5922 /* We do not accept a shared mapping if it would violate
5923 * cache aliasing constraints.
5924 */
5925- if ((flags & MAP_SHARED) &&
5926+ if ((filp || (flags & MAP_SHARED)) &&
5927 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5928 return -EINVAL;
5929 return addr;
5930@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5931 addr = PAGE_ALIGN(addr);
5932
5933 vma = find_vma(mm, addr);
5934- if (task_size - len >= addr &&
5935- (!vma || addr + len <= vma->vm_start))
5936+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5937 return addr;
5938 }
5939
5940@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5941 /* make sure it can fit in the remaining address space */
5942 if (likely(addr > len)) {
5943 vma = find_vma(mm, addr-len);
5944- if (!vma || addr <= vma->vm_start) {
5945+ if (check_heap_stack_gap(vma, addr - len, len)) {
5946 /* remember the address as a hint for next time */
5947 return (mm->free_area_cache = addr-len);
5948 }
5949@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5950 if (unlikely(mm->mmap_base < len))
5951 goto bottomup;
5952
5953- addr = mm->mmap_base-len;
5954- if (do_color_align)
5955- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5956+ addr = mm->mmap_base - len;
5957
5958 do {
5959+ if (do_color_align)
5960+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5961 /*
5962 * Lookup failure means no vma is above this address,
5963 * else if new region fits below vma->vm_start,
5964 * return with success:
5965 */
5966 vma = find_vma(mm, addr);
5967- if (likely(!vma || addr+len <= vma->vm_start)) {
5968+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5969 /* remember the address as a hint for next time */
5970 return (mm->free_area_cache = addr);
5971 }
5972@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5973 mm->cached_hole_size = vma->vm_start - addr;
5974
5975 /* try just below the current vma->vm_start */
5976- addr = vma->vm_start-len;
5977- if (do_color_align)
5978- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5979- } while (likely(len < vma->vm_start));
5980+ addr = skip_heap_stack_gap(vma, len);
5981+ } while (!IS_ERR_VALUE(addr));
5982
5983 bottomup:
5984 /*
5985@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5986 gap == RLIM_INFINITY ||
5987 sysctl_legacy_va_layout) {
5988 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5989+
5990+#ifdef CONFIG_PAX_RANDMMAP
5991+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5992+ mm->mmap_base += mm->delta_mmap;
5993+#endif
5994+
5995 mm->get_unmapped_area = arch_get_unmapped_area;
5996 mm->unmap_area = arch_unmap_area;
5997 } else {
5998@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5999 gap = (task_size / 6 * 5);
6000
6001 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
6002+
6003+#ifdef CONFIG_PAX_RANDMMAP
6004+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6005+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6006+#endif
6007+
6008 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6009 mm->unmap_area = arch_unmap_area_topdown;
6010 }
6011diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
6012index 1d7e274..b39c527 100644
6013--- a/arch/sparc/kernel/syscalls.S
6014+++ b/arch/sparc/kernel/syscalls.S
6015@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
6016 #endif
6017 .align 32
6018 1: ldx [%g6 + TI_FLAGS], %l5
6019- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6020+ andcc %l5, _TIF_WORK_SYSCALL, %g0
6021 be,pt %icc, rtrap
6022 nop
6023 call syscall_trace_leave
6024@@ -179,7 +179,7 @@ linux_sparc_syscall32:
6025
6026 srl %i5, 0, %o5 ! IEU1
6027 srl %i2, 0, %o2 ! IEU0 Group
6028- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6029+ andcc %l0, _TIF_WORK_SYSCALL, %g0
6030 bne,pn %icc, linux_syscall_trace32 ! CTI
6031 mov %i0, %l5 ! IEU1
6032 call %l7 ! CTI Group brk forced
6033@@ -202,7 +202,7 @@ linux_sparc_syscall:
6034
6035 mov %i3, %o3 ! IEU1
6036 mov %i4, %o4 ! IEU0 Group
6037- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6038+ andcc %l0, _TIF_WORK_SYSCALL, %g0
6039 bne,pn %icc, linux_syscall_trace ! CTI Group
6040 mov %i0, %l5 ! IEU0
6041 2: call %l7 ! CTI Group brk forced
6042@@ -226,7 +226,7 @@ ret_sys_call:
6043
6044 cmp %o0, -ERESTART_RESTARTBLOCK
6045 bgeu,pn %xcc, 1f
6046- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6047+ andcc %l0, _TIF_WORK_SYSCALL, %l6
6048 80:
6049 /* System call success, clear Carry condition code. */
6050 andn %g3, %g2, %g3
6051@@ -241,7 +241,7 @@ ret_sys_call:
6052 /* System call failure, set Carry condition code.
6053 * Also, get abs(errno) to return to the process.
6054 */
6055- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6056+ andcc %l0, _TIF_WORK_SYSCALL, %l6
6057 sub %g0, %o0, %o0
6058 or %g3, %g2, %g3
6059 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
6060diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6061index a5785ea..405c5f7 100644
6062--- a/arch/sparc/kernel/traps_32.c
6063+++ b/arch/sparc/kernel/traps_32.c
6064@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
6065 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6066 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6067
6068+extern void gr_handle_kernel_exploit(void);
6069+
6070 void die_if_kernel(char *str, struct pt_regs *regs)
6071 {
6072 static int die_counter;
6073@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6074 count++ < 30 &&
6075 (((unsigned long) rw) >= PAGE_OFFSET) &&
6076 !(((unsigned long) rw) & 0x7)) {
6077- printk("Caller[%08lx]: %pS\n", rw->ins[7],
6078+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
6079 (void *) rw->ins[7]);
6080 rw = (struct reg_window32 *)rw->ins[6];
6081 }
6082 }
6083 printk("Instruction DUMP:");
6084 instruction_dump ((unsigned long *) regs->pc);
6085- if(regs->psr & PSR_PS)
6086+ if(regs->psr & PSR_PS) {
6087+ gr_handle_kernel_exploit();
6088 do_exit(SIGKILL);
6089+ }
6090 do_exit(SIGSEGV);
6091 }
6092
6093diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6094index 3b05e66..6ea2917 100644
6095--- a/arch/sparc/kernel/traps_64.c
6096+++ b/arch/sparc/kernel/traps_64.c
6097@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6098 i + 1,
6099 p->trapstack[i].tstate, p->trapstack[i].tpc,
6100 p->trapstack[i].tnpc, p->trapstack[i].tt);
6101- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6102+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6103 }
6104 }
6105
6106@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6107
6108 lvl -= 0x100;
6109 if (regs->tstate & TSTATE_PRIV) {
6110+
6111+#ifdef CONFIG_PAX_REFCOUNT
6112+ if (lvl == 6)
6113+ pax_report_refcount_overflow(regs);
6114+#endif
6115+
6116 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6117 die_if_kernel(buffer, regs);
6118 }
6119@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6120 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6121 {
6122 char buffer[32];
6123-
6124+
6125 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6126 0, lvl, SIGTRAP) == NOTIFY_STOP)
6127 return;
6128
6129+#ifdef CONFIG_PAX_REFCOUNT
6130+ if (lvl == 6)
6131+ pax_report_refcount_overflow(regs);
6132+#endif
6133+
6134 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6135
6136 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6137@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6138 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6139 printk("%s" "ERROR(%d): ",
6140 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6141- printk("TPC<%pS>\n", (void *) regs->tpc);
6142+ printk("TPC<%pA>\n", (void *) regs->tpc);
6143 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6144 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6145 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6146@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6147 smp_processor_id(),
6148 (type & 0x1) ? 'I' : 'D',
6149 regs->tpc);
6150- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6151+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6152 panic("Irrecoverable Cheetah+ parity error.");
6153 }
6154
6155@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6156 smp_processor_id(),
6157 (type & 0x1) ? 'I' : 'D',
6158 regs->tpc);
6159- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6160+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6161 }
6162
6163 struct sun4v_error_entry {
6164@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6165
6166 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6167 regs->tpc, tl);
6168- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6169+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6170 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6171- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6172+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6173 (void *) regs->u_regs[UREG_I7]);
6174 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6175 "pte[%lx] error[%lx]\n",
6176@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6177
6178 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6179 regs->tpc, tl);
6180- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6181+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6182 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6183- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6184+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6185 (void *) regs->u_regs[UREG_I7]);
6186 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6187 "pte[%lx] error[%lx]\n",
6188@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6189 fp = (unsigned long)sf->fp + STACK_BIAS;
6190 }
6191
6192- printk(" [%016lx] %pS\n", pc, (void *) pc);
6193+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6194 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6195 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
6196 int index = tsk->curr_ret_stack;
6197 if (tsk->ret_stack && index >= graph) {
6198 pc = tsk->ret_stack[index - graph].ret;
6199- printk(" [%016lx] %pS\n", pc, (void *) pc);
6200+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6201 graph++;
6202 }
6203 }
6204@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6205 return (struct reg_window *) (fp + STACK_BIAS);
6206 }
6207
6208+extern void gr_handle_kernel_exploit(void);
6209+
6210 void die_if_kernel(char *str, struct pt_regs *regs)
6211 {
6212 static int die_counter;
6213@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6214 while (rw &&
6215 count++ < 30 &&
6216 kstack_valid(tp, (unsigned long) rw)) {
6217- printk("Caller[%016lx]: %pS\n", rw->ins[7],
6218+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
6219 (void *) rw->ins[7]);
6220
6221 rw = kernel_stack_up(rw);
6222@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6223 }
6224 user_instruction_dump ((unsigned int __user *) regs->tpc);
6225 }
6226- if (regs->tstate & TSTATE_PRIV)
6227+ if (regs->tstate & TSTATE_PRIV) {
6228+ gr_handle_kernel_exploit();
6229 do_exit(SIGKILL);
6230+ }
6231 do_exit(SIGSEGV);
6232 }
6233 EXPORT_SYMBOL(die_if_kernel);
6234diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6235index f81d038..e7a4680 100644
6236--- a/arch/sparc/kernel/unaligned_64.c
6237+++ b/arch/sparc/kernel/unaligned_64.c
6238@@ -278,7 +278,7 @@ static void log_unaligned(struct pt_regs *regs)
6239 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
6240
6241 if (__ratelimit(&ratelimit)) {
6242- printk("Kernel unaligned access at TPC[%lx] %pS\n",
6243+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
6244 regs->tpc, (void *) regs->tpc);
6245 }
6246 }
6247diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6248index dff4096..bd9a388 100644
6249--- a/arch/sparc/lib/Makefile
6250+++ b/arch/sparc/lib/Makefile
6251@@ -2,7 +2,7 @@
6252 #
6253
6254 asflags-y := -ansi -DST_DIV0=0x02
6255-ccflags-y := -Werror
6256+#ccflags-y := -Werror
6257
6258 lib-$(CONFIG_SPARC32) += ashrdi3.o
6259 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6260diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6261index 4d502da..527c48d 100644
6262--- a/arch/sparc/lib/atomic_64.S
6263+++ b/arch/sparc/lib/atomic_64.S
6264@@ -17,7 +17,12 @@
6265 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
6266 BACKOFF_SETUP(%o2)
6267 1: lduw [%o1], %g1
6268- add %g1, %o0, %g7
6269+ addcc %g1, %o0, %g7
6270+
6271+#ifdef CONFIG_PAX_REFCOUNT
6272+ tvs %icc, 6
6273+#endif
6274+
6275 cas [%o1], %g1, %g7
6276 cmp %g1, %g7
6277 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6278@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
6279 2: BACKOFF_SPIN(%o2, %o3, 1b)
6280 ENDPROC(atomic_add)
6281
6282+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
6283+ BACKOFF_SETUP(%o2)
6284+1: lduw [%o1], %g1
6285+ add %g1, %o0, %g7
6286+ cas [%o1], %g1, %g7
6287+ cmp %g1, %g7
6288+ bne,pn %icc, 2f
6289+ nop
6290+ retl
6291+ nop
6292+2: BACKOFF_SPIN(%o2, %o3, 1b)
6293+ENDPROC(atomic_add_unchecked)
6294+
6295 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
6296 BACKOFF_SETUP(%o2)
6297 1: lduw [%o1], %g1
6298- sub %g1, %o0, %g7
6299+ subcc %g1, %o0, %g7
6300+
6301+#ifdef CONFIG_PAX_REFCOUNT
6302+ tvs %icc, 6
6303+#endif
6304+
6305 cas [%o1], %g1, %g7
6306 cmp %g1, %g7
6307 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6308@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
6309 2: BACKOFF_SPIN(%o2, %o3, 1b)
6310 ENDPROC(atomic_sub)
6311
6312+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
6313+ BACKOFF_SETUP(%o2)
6314+1: lduw [%o1], %g1
6315+ sub %g1, %o0, %g7
6316+ cas [%o1], %g1, %g7
6317+ cmp %g1, %g7
6318+ bne,pn %icc, 2f
6319+ nop
6320+ retl
6321+ nop
6322+2: BACKOFF_SPIN(%o2, %o3, 1b)
6323+ENDPROC(atomic_sub_unchecked)
6324+
6325 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
6326 BACKOFF_SETUP(%o2)
6327 1: lduw [%o1], %g1
6328- add %g1, %o0, %g7
6329+ addcc %g1, %o0, %g7
6330+
6331+#ifdef CONFIG_PAX_REFCOUNT
6332+ tvs %icc, 6
6333+#endif
6334+
6335 cas [%o1], %g1, %g7
6336 cmp %g1, %g7
6337 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6338@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
6339 2: BACKOFF_SPIN(%o2, %o3, 1b)
6340 ENDPROC(atomic_add_ret)
6341
6342+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
6343+ BACKOFF_SETUP(%o2)
6344+1: lduw [%o1], %g1
6345+ addcc %g1, %o0, %g7
6346+ cas [%o1], %g1, %g7
6347+ cmp %g1, %g7
6348+ bne,pn %icc, 2f
6349+ add %g7, %o0, %g7
6350+ sra %g7, 0, %o0
6351+ retl
6352+ nop
6353+2: BACKOFF_SPIN(%o2, %o3, 1b)
6354+ENDPROC(atomic_add_ret_unchecked)
6355+
6356 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
6357 BACKOFF_SETUP(%o2)
6358 1: lduw [%o1], %g1
6359- sub %g1, %o0, %g7
6360+ subcc %g1, %o0, %g7
6361+
6362+#ifdef CONFIG_PAX_REFCOUNT
6363+ tvs %icc, 6
6364+#endif
6365+
6366 cas [%o1], %g1, %g7
6367 cmp %g1, %g7
6368 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6369@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
6370 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
6371 BACKOFF_SETUP(%o2)
6372 1: ldx [%o1], %g1
6373- add %g1, %o0, %g7
6374+ addcc %g1, %o0, %g7
6375+
6376+#ifdef CONFIG_PAX_REFCOUNT
6377+ tvs %xcc, 6
6378+#endif
6379+
6380 casx [%o1], %g1, %g7
6381 cmp %g1, %g7
6382 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6383@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
6384 2: BACKOFF_SPIN(%o2, %o3, 1b)
6385 ENDPROC(atomic64_add)
6386
6387+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
6388+ BACKOFF_SETUP(%o2)
6389+1: ldx [%o1], %g1
6390+ addcc %g1, %o0, %g7
6391+ casx [%o1], %g1, %g7
6392+ cmp %g1, %g7
6393+ bne,pn %xcc, 2f
6394+ nop
6395+ retl
6396+ nop
6397+2: BACKOFF_SPIN(%o2, %o3, 1b)
6398+ENDPROC(atomic64_add_unchecked)
6399+
6400 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
6401 BACKOFF_SETUP(%o2)
6402 1: ldx [%o1], %g1
6403- sub %g1, %o0, %g7
6404+ subcc %g1, %o0, %g7
6405+
6406+#ifdef CONFIG_PAX_REFCOUNT
6407+ tvs %xcc, 6
6408+#endif
6409+
6410 casx [%o1], %g1, %g7
6411 cmp %g1, %g7
6412 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6413@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
6414 2: BACKOFF_SPIN(%o2, %o3, 1b)
6415 ENDPROC(atomic64_sub)
6416
6417+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
6418+ BACKOFF_SETUP(%o2)
6419+1: ldx [%o1], %g1
6420+ subcc %g1, %o0, %g7
6421+ casx [%o1], %g1, %g7
6422+ cmp %g1, %g7
6423+ bne,pn %xcc, 2f
6424+ nop
6425+ retl
6426+ nop
6427+2: BACKOFF_SPIN(%o2, %o3, 1b)
6428+ENDPROC(atomic64_sub_unchecked)
6429+
6430 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
6431 BACKOFF_SETUP(%o2)
6432 1: ldx [%o1], %g1
6433- add %g1, %o0, %g7
6434+ addcc %g1, %o0, %g7
6435+
6436+#ifdef CONFIG_PAX_REFCOUNT
6437+ tvs %xcc, 6
6438+#endif
6439+
6440 casx [%o1], %g1, %g7
6441 cmp %g1, %g7
6442 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6443@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
6444 2: BACKOFF_SPIN(%o2, %o3, 1b)
6445 ENDPROC(atomic64_add_ret)
6446
6447+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
6448+ BACKOFF_SETUP(%o2)
6449+1: ldx [%o1], %g1
6450+ addcc %g1, %o0, %g7
6451+ casx [%o1], %g1, %g7
6452+ cmp %g1, %g7
6453+ bne,pn %xcc, 2f
6454+ add %g7, %o0, %g7
6455+ mov %g7, %o0
6456+ retl
6457+ nop
6458+2: BACKOFF_SPIN(%o2, %o3, 1b)
6459+ENDPROC(atomic64_add_ret_unchecked)
6460+
6461 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
6462 BACKOFF_SETUP(%o2)
6463 1: ldx [%o1], %g1
6464- sub %g1, %o0, %g7
6465+ subcc %g1, %o0, %g7
6466+
6467+#ifdef CONFIG_PAX_REFCOUNT
6468+ tvs %xcc, 6
6469+#endif
6470+
6471 casx [%o1], %g1, %g7
6472 cmp %g1, %g7
6473 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6474diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6475index 3b31218..345c609 100644
6476--- a/arch/sparc/lib/ksyms.c
6477+++ b/arch/sparc/lib/ksyms.c
6478@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
6479
6480 /* Atomic counter implementation. */
6481 EXPORT_SYMBOL(atomic_add);
6482+EXPORT_SYMBOL(atomic_add_unchecked);
6483 EXPORT_SYMBOL(atomic_add_ret);
6484+EXPORT_SYMBOL(atomic_add_ret_unchecked);
6485 EXPORT_SYMBOL(atomic_sub);
6486+EXPORT_SYMBOL(atomic_sub_unchecked);
6487 EXPORT_SYMBOL(atomic_sub_ret);
6488 EXPORT_SYMBOL(atomic64_add);
6489+EXPORT_SYMBOL(atomic64_add_unchecked);
6490 EXPORT_SYMBOL(atomic64_add_ret);
6491+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6492 EXPORT_SYMBOL(atomic64_sub);
6493+EXPORT_SYMBOL(atomic64_sub_unchecked);
6494 EXPORT_SYMBOL(atomic64_sub_ret);
6495
6496 /* Atomic bit operations. */
6497diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6498index 30c3ecc..736f015 100644
6499--- a/arch/sparc/mm/Makefile
6500+++ b/arch/sparc/mm/Makefile
6501@@ -2,7 +2,7 @@
6502 #
6503
6504 asflags-y := -ansi
6505-ccflags-y := -Werror
6506+#ccflags-y := -Werror
6507
6508 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
6509 obj-y += fault_$(BITS).o
6510diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6511index f46cf6b..7235ec9 100644
6512--- a/arch/sparc/mm/fault_32.c
6513+++ b/arch/sparc/mm/fault_32.c
6514@@ -21,6 +21,9 @@
6515 #include <linux/perf_event.h>
6516 #include <linux/interrupt.h>
6517 #include <linux/kdebug.h>
6518+#include <linux/slab.h>
6519+#include <linux/pagemap.h>
6520+#include <linux/compiler.h>
6521
6522 #include <asm/page.h>
6523 #include <asm/pgtable.h>
6524@@ -177,6 +180,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6525 return safe_compute_effective_address(regs, insn);
6526 }
6527
6528+#ifdef CONFIG_PAX_PAGEEXEC
6529+#ifdef CONFIG_PAX_DLRESOLVE
6530+static void pax_emuplt_close(struct vm_area_struct *vma)
6531+{
6532+ vma->vm_mm->call_dl_resolve = 0UL;
6533+}
6534+
6535+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6536+{
6537+ unsigned int *kaddr;
6538+
6539+ vmf->page = alloc_page(GFP_HIGHUSER);
6540+ if (!vmf->page)
6541+ return VM_FAULT_OOM;
6542+
6543+ kaddr = kmap(vmf->page);
6544+ memset(kaddr, 0, PAGE_SIZE);
6545+ kaddr[0] = 0x9DE3BFA8U; /* save */
6546+ flush_dcache_page(vmf->page);
6547+ kunmap(vmf->page);
6548+ return VM_FAULT_MAJOR;
6549+}
6550+
6551+static const struct vm_operations_struct pax_vm_ops = {
6552+ .close = pax_emuplt_close,
6553+ .fault = pax_emuplt_fault
6554+};
6555+
6556+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6557+{
6558+ int ret;
6559+
6560+ INIT_LIST_HEAD(&vma->anon_vma_chain);
6561+ vma->vm_mm = current->mm;
6562+ vma->vm_start = addr;
6563+ vma->vm_end = addr + PAGE_SIZE;
6564+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6565+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6566+ vma->vm_ops = &pax_vm_ops;
6567+
6568+ ret = insert_vm_struct(current->mm, vma);
6569+ if (ret)
6570+ return ret;
6571+
6572+ ++current->mm->total_vm;
6573+ return 0;
6574+}
6575+#endif
6576+
6577+/*
6578+ * PaX: decide what to do with offenders (regs->pc = fault address)
6579+ *
6580+ * returns 1 when task should be killed
6581+ * 2 when patched PLT trampoline was detected
6582+ * 3 when unpatched PLT trampoline was detected
6583+ */
6584+static int pax_handle_fetch_fault(struct pt_regs *regs)
6585+{
6586+
6587+#ifdef CONFIG_PAX_EMUPLT
6588+ int err;
6589+
6590+ do { /* PaX: patched PLT emulation #1 */
6591+ unsigned int sethi1, sethi2, jmpl;
6592+
6593+ err = get_user(sethi1, (unsigned int *)regs->pc);
6594+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6595+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6596+
6597+ if (err)
6598+ break;
6599+
6600+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6601+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6602+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6603+ {
6604+ unsigned int addr;
6605+
6606+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6607+ addr = regs->u_regs[UREG_G1];
6608+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6609+ regs->pc = addr;
6610+ regs->npc = addr+4;
6611+ return 2;
6612+ }
6613+ } while (0);
6614+
6615+ do { /* PaX: patched PLT emulation #2 */
6616+ unsigned int ba;
6617+
6618+ err = get_user(ba, (unsigned int *)regs->pc);
6619+
6620+ if (err)
6621+ break;
6622+
6623+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
6624+ unsigned int addr;
6625+
6626+ if ((ba & 0xFFC00000U) == 0x30800000U)
6627+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6628+ else
6629+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6630+ regs->pc = addr;
6631+ regs->npc = addr+4;
6632+ return 2;
6633+ }
6634+ } while (0);
6635+
6636+ do { /* PaX: patched PLT emulation #3 */
6637+ unsigned int sethi, bajmpl, nop;
6638+
6639+ err = get_user(sethi, (unsigned int *)regs->pc);
6640+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
6641+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6642+
6643+ if (err)
6644+ break;
6645+
6646+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6647+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
6648+ nop == 0x01000000U)
6649+ {
6650+ unsigned int addr;
6651+
6652+ addr = (sethi & 0x003FFFFFU) << 10;
6653+ regs->u_regs[UREG_G1] = addr;
6654+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
6655+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6656+ else
6657+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6658+ regs->pc = addr;
6659+ regs->npc = addr+4;
6660+ return 2;
6661+ }
6662+ } while (0);
6663+
6664+ do { /* PaX: unpatched PLT emulation step 1 */
6665+ unsigned int sethi, ba, nop;
6666+
6667+ err = get_user(sethi, (unsigned int *)regs->pc);
6668+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
6669+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6670+
6671+ if (err)
6672+ break;
6673+
6674+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6675+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6676+ nop == 0x01000000U)
6677+ {
6678+ unsigned int addr, save, call;
6679+
6680+ if ((ba & 0xFFC00000U) == 0x30800000U)
6681+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6682+ else
6683+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6684+
6685+ err = get_user(save, (unsigned int *)addr);
6686+ err |= get_user(call, (unsigned int *)(addr+4));
6687+ err |= get_user(nop, (unsigned int *)(addr+8));
6688+ if (err)
6689+ break;
6690+
6691+#ifdef CONFIG_PAX_DLRESOLVE
6692+ if (save == 0x9DE3BFA8U &&
6693+ (call & 0xC0000000U) == 0x40000000U &&
6694+ nop == 0x01000000U)
6695+ {
6696+ struct vm_area_struct *vma;
6697+ unsigned long call_dl_resolve;
6698+
6699+ down_read(&current->mm->mmap_sem);
6700+ call_dl_resolve = current->mm->call_dl_resolve;
6701+ up_read(&current->mm->mmap_sem);
6702+ if (likely(call_dl_resolve))
6703+ goto emulate;
6704+
6705+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6706+
6707+ down_write(&current->mm->mmap_sem);
6708+ if (current->mm->call_dl_resolve) {
6709+ call_dl_resolve = current->mm->call_dl_resolve;
6710+ up_write(&current->mm->mmap_sem);
6711+ if (vma)
6712+ kmem_cache_free(vm_area_cachep, vma);
6713+ goto emulate;
6714+ }
6715+
6716+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6717+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6718+ up_write(&current->mm->mmap_sem);
6719+ if (vma)
6720+ kmem_cache_free(vm_area_cachep, vma);
6721+ return 1;
6722+ }
6723+
6724+ if (pax_insert_vma(vma, call_dl_resolve)) {
6725+ up_write(&current->mm->mmap_sem);
6726+ kmem_cache_free(vm_area_cachep, vma);
6727+ return 1;
6728+ }
6729+
6730+ current->mm->call_dl_resolve = call_dl_resolve;
6731+ up_write(&current->mm->mmap_sem);
6732+
6733+emulate:
6734+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6735+ regs->pc = call_dl_resolve;
6736+ regs->npc = addr+4;
6737+ return 3;
6738+ }
6739+#endif
6740+
6741+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6742+ if ((save & 0xFFC00000U) == 0x05000000U &&
6743+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6744+ nop == 0x01000000U)
6745+ {
6746+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6747+ regs->u_regs[UREG_G2] = addr + 4;
6748+ addr = (save & 0x003FFFFFU) << 10;
6749+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6750+ regs->pc = addr;
6751+ regs->npc = addr+4;
6752+ return 3;
6753+ }
6754+ }
6755+ } while (0);
6756+
6757+ do { /* PaX: unpatched PLT emulation step 2 */
6758+ unsigned int save, call, nop;
6759+
6760+ err = get_user(save, (unsigned int *)(regs->pc-4));
6761+ err |= get_user(call, (unsigned int *)regs->pc);
6762+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6763+ if (err)
6764+ break;
6765+
6766+ if (save == 0x9DE3BFA8U &&
6767+ (call & 0xC0000000U) == 0x40000000U &&
6768+ nop == 0x01000000U)
6769+ {
6770+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6771+
6772+ regs->u_regs[UREG_RETPC] = regs->pc;
6773+ regs->pc = dl_resolve;
6774+ regs->npc = dl_resolve+4;
6775+ return 3;
6776+ }
6777+ } while (0);
6778+#endif
6779+
6780+ return 1;
6781+}
6782+
6783+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6784+{
6785+ unsigned long i;
6786+
6787+ printk(KERN_ERR "PAX: bytes at PC: ");
6788+ for (i = 0; i < 8; i++) {
6789+ unsigned int c;
6790+ if (get_user(c, (unsigned int *)pc+i))
6791+ printk(KERN_CONT "???????? ");
6792+ else
6793+ printk(KERN_CONT "%08x ", c);
6794+ }
6795+ printk("\n");
6796+}
6797+#endif
6798+
6799 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6800 int text_fault)
6801 {
6802@@ -248,6 +522,24 @@ good_area:
6803 if (!(vma->vm_flags & VM_WRITE))
6804 goto bad_area;
6805 } else {
6806+
6807+#ifdef CONFIG_PAX_PAGEEXEC
6808+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6809+ up_read(&mm->mmap_sem);
6810+ switch (pax_handle_fetch_fault(regs)) {
6811+
6812+#ifdef CONFIG_PAX_EMUPLT
6813+ case 2:
6814+ case 3:
6815+ return;
6816+#endif
6817+
6818+ }
6819+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6820+ do_group_exit(SIGKILL);
6821+ }
6822+#endif
6823+
6824 /* Allow reads even for write-only mappings */
6825 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
6826 goto bad_area;
6827diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6828index 1fe0429..8dd5dd5 100644
6829--- a/arch/sparc/mm/fault_64.c
6830+++ b/arch/sparc/mm/fault_64.c
6831@@ -21,6 +21,9 @@
6832 #include <linux/kprobes.h>
6833 #include <linux/kdebug.h>
6834 #include <linux/percpu.h>
6835+#include <linux/slab.h>
6836+#include <linux/pagemap.h>
6837+#include <linux/compiler.h>
6838
6839 #include <asm/page.h>
6840 #include <asm/pgtable.h>
6841@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6842 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6843 regs->tpc);
6844 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6845- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6846+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6847 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6848 dump_stack();
6849 unhandled_fault(regs->tpc, current, regs);
6850@@ -272,6 +275,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6851 show_regs(regs);
6852 }
6853
6854+#ifdef CONFIG_PAX_PAGEEXEC
6855+#ifdef CONFIG_PAX_DLRESOLVE
6856+static void pax_emuplt_close(struct vm_area_struct *vma)
6857+{
6858+ vma->vm_mm->call_dl_resolve = 0UL;
6859+}
6860+
6861+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6862+{
6863+ unsigned int *kaddr;
6864+
6865+ vmf->page = alloc_page(GFP_HIGHUSER);
6866+ if (!vmf->page)
6867+ return VM_FAULT_OOM;
6868+
6869+ kaddr = kmap(vmf->page);
6870+ memset(kaddr, 0, PAGE_SIZE);
6871+ kaddr[0] = 0x9DE3BFA8U; /* save */
6872+ flush_dcache_page(vmf->page);
6873+ kunmap(vmf->page);
6874+ return VM_FAULT_MAJOR;
6875+}
6876+
6877+static const struct vm_operations_struct pax_vm_ops = {
6878+ .close = pax_emuplt_close,
6879+ .fault = pax_emuplt_fault
6880+};
6881+
6882+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6883+{
6884+ int ret;
6885+
6886+ INIT_LIST_HEAD(&vma->anon_vma_chain);
6887+ vma->vm_mm = current->mm;
6888+ vma->vm_start = addr;
6889+ vma->vm_end = addr + PAGE_SIZE;
6890+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6891+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6892+ vma->vm_ops = &pax_vm_ops;
6893+
6894+ ret = insert_vm_struct(current->mm, vma);
6895+ if (ret)
6896+ return ret;
6897+
6898+ ++current->mm->total_vm;
6899+ return 0;
6900+}
6901+#endif
6902+
6903+/*
6904+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6905+ *
6906+ * returns 1 when task should be killed
6907+ * 2 when patched PLT trampoline was detected
6908+ * 3 when unpatched PLT trampoline was detected
6909+ */
6910+static int pax_handle_fetch_fault(struct pt_regs *regs)
6911+{
6912+
6913+#ifdef CONFIG_PAX_EMUPLT
6914+ int err;
6915+
6916+ do { /* PaX: patched PLT emulation #1 */
6917+ unsigned int sethi1, sethi2, jmpl;
6918+
6919+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6920+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6921+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6922+
6923+ if (err)
6924+ break;
6925+
6926+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6927+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6928+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6929+ {
6930+ unsigned long addr;
6931+
6932+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6933+ addr = regs->u_regs[UREG_G1];
6934+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6935+
6936+ if (test_thread_flag(TIF_32BIT))
6937+ addr &= 0xFFFFFFFFUL;
6938+
6939+ regs->tpc = addr;
6940+ regs->tnpc = addr+4;
6941+ return 2;
6942+ }
6943+ } while (0);
6944+
6945+ do { /* PaX: patched PLT emulation #2 */
6946+ unsigned int ba;
6947+
6948+ err = get_user(ba, (unsigned int *)regs->tpc);
6949+
6950+ if (err)
6951+ break;
6952+
6953+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
6954+ unsigned long addr;
6955+
6956+ if ((ba & 0xFFC00000U) == 0x30800000U)
6957+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6958+ else
6959+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6960+
6961+ if (test_thread_flag(TIF_32BIT))
6962+ addr &= 0xFFFFFFFFUL;
6963+
6964+ regs->tpc = addr;
6965+ regs->tnpc = addr+4;
6966+ return 2;
6967+ }
6968+ } while (0);
6969+
6970+ do { /* PaX: patched PLT emulation #3 */
6971+ unsigned int sethi, bajmpl, nop;
6972+
6973+ err = get_user(sethi, (unsigned int *)regs->tpc);
6974+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
6975+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6976+
6977+ if (err)
6978+ break;
6979+
6980+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6981+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
6982+ nop == 0x01000000U)
6983+ {
6984+ unsigned long addr;
6985+
6986+ addr = (sethi & 0x003FFFFFU) << 10;
6987+ regs->u_regs[UREG_G1] = addr;
6988+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
6989+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6990+ else
6991+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6992+
6993+ if (test_thread_flag(TIF_32BIT))
6994+ addr &= 0xFFFFFFFFUL;
6995+
6996+ regs->tpc = addr;
6997+ regs->tnpc = addr+4;
6998+ return 2;
6999+ }
7000+ } while (0);
7001+
7002+ do { /* PaX: patched PLT emulation #4 */
7003+ unsigned int sethi, mov1, call, mov2;
7004+
7005+ err = get_user(sethi, (unsigned int *)regs->tpc);
7006+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
7007+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
7008+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
7009+
7010+ if (err)
7011+ break;
7012+
7013+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7014+ mov1 == 0x8210000FU &&
7015+ (call & 0xC0000000U) == 0x40000000U &&
7016+ mov2 == 0x9E100001U)
7017+ {
7018+ unsigned long addr;
7019+
7020+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
7021+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7022+
7023+ if (test_thread_flag(TIF_32BIT))
7024+ addr &= 0xFFFFFFFFUL;
7025+
7026+ regs->tpc = addr;
7027+ regs->tnpc = addr+4;
7028+ return 2;
7029+ }
7030+ } while (0);
7031+
7032+ do { /* PaX: patched PLT emulation #5 */
7033+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
7034+
7035+ err = get_user(sethi, (unsigned int *)regs->tpc);
7036+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7037+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7038+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7039+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7040+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7041+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7042+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7043+
7044+ if (err)
7045+ break;
7046+
7047+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7048+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7049+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7050+ (or1 & 0xFFFFE000U) == 0x82106000U &&
7051+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7052+ sllx == 0x83287020U &&
7053+ jmpl == 0x81C04005U &&
7054+ nop == 0x01000000U)
7055+ {
7056+ unsigned long addr;
7057+
7058+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7059+ regs->u_regs[UREG_G1] <<= 32;
7060+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7061+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7062+ regs->tpc = addr;
7063+ regs->tnpc = addr+4;
7064+ return 2;
7065+ }
7066+ } while (0);
7067+
7068+ do { /* PaX: patched PLT emulation #6 */
7069+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7070+
7071+ err = get_user(sethi, (unsigned int *)regs->tpc);
7072+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7073+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7074+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7075+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
7076+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7077+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7078+
7079+ if (err)
7080+ break;
7081+
7082+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7083+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7084+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7085+ sllx == 0x83287020U &&
7086+ (or & 0xFFFFE000U) == 0x8A116000U &&
7087+ jmpl == 0x81C04005U &&
7088+ nop == 0x01000000U)
7089+ {
7090+ unsigned long addr;
7091+
7092+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7093+ regs->u_regs[UREG_G1] <<= 32;
7094+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7095+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7096+ regs->tpc = addr;
7097+ regs->tnpc = addr+4;
7098+ return 2;
7099+ }
7100+ } while (0);
7101+
7102+ do { /* PaX: unpatched PLT emulation step 1 */
7103+ unsigned int sethi, ba, nop;
7104+
7105+ err = get_user(sethi, (unsigned int *)regs->tpc);
7106+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7107+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7108+
7109+ if (err)
7110+ break;
7111+
7112+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7113+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7114+ nop == 0x01000000U)
7115+ {
7116+ unsigned long addr;
7117+ unsigned int save, call;
7118+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7119+
7120+ if ((ba & 0xFFC00000U) == 0x30800000U)
7121+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7122+ else
7123+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7124+
7125+ if (test_thread_flag(TIF_32BIT))
7126+ addr &= 0xFFFFFFFFUL;
7127+
7128+ err = get_user(save, (unsigned int *)addr);
7129+ err |= get_user(call, (unsigned int *)(addr+4));
7130+ err |= get_user(nop, (unsigned int *)(addr+8));
7131+ if (err)
7132+ break;
7133+
7134+#ifdef CONFIG_PAX_DLRESOLVE
7135+ if (save == 0x9DE3BFA8U &&
7136+ (call & 0xC0000000U) == 0x40000000U &&
7137+ nop == 0x01000000U)
7138+ {
7139+ struct vm_area_struct *vma;
7140+ unsigned long call_dl_resolve;
7141+
7142+ down_read(&current->mm->mmap_sem);
7143+ call_dl_resolve = current->mm->call_dl_resolve;
7144+ up_read(&current->mm->mmap_sem);
7145+ if (likely(call_dl_resolve))
7146+ goto emulate;
7147+
7148+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7149+
7150+ down_write(&current->mm->mmap_sem);
7151+ if (current->mm->call_dl_resolve) {
7152+ call_dl_resolve = current->mm->call_dl_resolve;
7153+ up_write(&current->mm->mmap_sem);
7154+ if (vma)
7155+ kmem_cache_free(vm_area_cachep, vma);
7156+ goto emulate;
7157+ }
7158+
7159+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7160+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7161+ up_write(&current->mm->mmap_sem);
7162+ if (vma)
7163+ kmem_cache_free(vm_area_cachep, vma);
7164+ return 1;
7165+ }
7166+
7167+ if (pax_insert_vma(vma, call_dl_resolve)) {
7168+ up_write(&current->mm->mmap_sem);
7169+ kmem_cache_free(vm_area_cachep, vma);
7170+ return 1;
7171+ }
7172+
7173+ current->mm->call_dl_resolve = call_dl_resolve;
7174+ up_write(&current->mm->mmap_sem);
7175+
7176+emulate:
7177+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7178+ regs->tpc = call_dl_resolve;
7179+ regs->tnpc = addr+4;
7180+ return 3;
7181+ }
7182+#endif
7183+
7184+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7185+ if ((save & 0xFFC00000U) == 0x05000000U &&
7186+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7187+ nop == 0x01000000U)
7188+ {
7189+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7190+ regs->u_regs[UREG_G2] = addr + 4;
7191+ addr = (save & 0x003FFFFFU) << 10;
7192+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7193+
7194+ if (test_thread_flag(TIF_32BIT))
7195+ addr &= 0xFFFFFFFFUL;
7196+
7197+ regs->tpc = addr;
7198+ regs->tnpc = addr+4;
7199+ return 3;
7200+ }
7201+
7202+ /* PaX: 64-bit PLT stub */
7203+ err = get_user(sethi1, (unsigned int *)addr);
7204+ err |= get_user(sethi2, (unsigned int *)(addr+4));
7205+ err |= get_user(or1, (unsigned int *)(addr+8));
7206+ err |= get_user(or2, (unsigned int *)(addr+12));
7207+ err |= get_user(sllx, (unsigned int *)(addr+16));
7208+ err |= get_user(add, (unsigned int *)(addr+20));
7209+ err |= get_user(jmpl, (unsigned int *)(addr+24));
7210+ err |= get_user(nop, (unsigned int *)(addr+28));
7211+ if (err)
7212+ break;
7213+
7214+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7215+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7216+ (or1 & 0xFFFFE000U) == 0x88112000U &&
7217+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7218+ sllx == 0x89293020U &&
7219+ add == 0x8A010005U &&
7220+ jmpl == 0x89C14000U &&
7221+ nop == 0x01000000U)
7222+ {
7223+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7224+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7225+ regs->u_regs[UREG_G4] <<= 32;
7226+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7227+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7228+ regs->u_regs[UREG_G4] = addr + 24;
7229+ addr = regs->u_regs[UREG_G5];
7230+ regs->tpc = addr;
7231+ regs->tnpc = addr+4;
7232+ return 3;
7233+ }
7234+ }
7235+ } while (0);
7236+
7237+#ifdef CONFIG_PAX_DLRESOLVE
7238+ do { /* PaX: unpatched PLT emulation step 2 */
7239+ unsigned int save, call, nop;
7240+
7241+ err = get_user(save, (unsigned int *)(regs->tpc-4));
7242+ err |= get_user(call, (unsigned int *)regs->tpc);
7243+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7244+ if (err)
7245+ break;
7246+
7247+ if (save == 0x9DE3BFA8U &&
7248+ (call & 0xC0000000U) == 0x40000000U &&
7249+ nop == 0x01000000U)
7250+ {
7251+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7252+
7253+ if (test_thread_flag(TIF_32BIT))
7254+ dl_resolve &= 0xFFFFFFFFUL;
7255+
7256+ regs->u_regs[UREG_RETPC] = regs->tpc;
7257+ regs->tpc = dl_resolve;
7258+ regs->tnpc = dl_resolve+4;
7259+ return 3;
7260+ }
7261+ } while (0);
7262+#endif
7263+
7264+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7265+ unsigned int sethi, ba, nop;
7266+
7267+ err = get_user(sethi, (unsigned int *)regs->tpc);
7268+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7269+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7270+
7271+ if (err)
7272+ break;
7273+
7274+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7275+ (ba & 0xFFF00000U) == 0x30600000U &&
7276+ nop == 0x01000000U)
7277+ {
7278+ unsigned long addr;
7279+
7280+ addr = (sethi & 0x003FFFFFU) << 10;
7281+ regs->u_regs[UREG_G1] = addr;
7282+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7283+
7284+ if (test_thread_flag(TIF_32BIT))
7285+ addr &= 0xFFFFFFFFUL;
7286+
7287+ regs->tpc = addr;
7288+ regs->tnpc = addr+4;
7289+ return 2;
7290+ }
7291+ } while (0);
7292+
7293+#endif
7294+
7295+ return 1;
7296+}
7297+
7298+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7299+{
7300+ unsigned long i;
7301+
7302+ printk(KERN_ERR "PAX: bytes at PC: ");
7303+ for (i = 0; i < 8; i++) {
7304+ unsigned int c;
7305+ if (get_user(c, (unsigned int *)pc+i))
7306+ printk(KERN_CONT "???????? ");
7307+ else
7308+ printk(KERN_CONT "%08x ", c);
7309+ }
7310+ printk("\n");
7311+}
7312+#endif
7313+
7314 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7315 {
7316 struct mm_struct *mm = current->mm;
7317@@ -343,6 +806,29 @@ retry:
7318 if (!vma)
7319 goto bad_area;
7320
7321+#ifdef CONFIG_PAX_PAGEEXEC
7322+ /* PaX: detect ITLB misses on non-exec pages */
7323+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7324+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7325+ {
7326+ if (address != regs->tpc)
7327+ goto good_area;
7328+
7329+ up_read(&mm->mmap_sem);
7330+ switch (pax_handle_fetch_fault(regs)) {
7331+
7332+#ifdef CONFIG_PAX_EMUPLT
7333+ case 2:
7334+ case 3:
7335+ return;
7336+#endif
7337+
7338+ }
7339+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7340+ do_group_exit(SIGKILL);
7341+ }
7342+#endif
7343+
7344 /* Pure DTLB misses do not tell us whether the fault causing
7345 * load/store/atomic was a write or not, it only says that there
7346 * was no match. So in such a case we (carefully) read the
7347diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7348index 07e1453..0a7d9e9 100644
7349--- a/arch/sparc/mm/hugetlbpage.c
7350+++ b/arch/sparc/mm/hugetlbpage.c
7351@@ -67,7 +67,7 @@ full_search:
7352 }
7353 return -ENOMEM;
7354 }
7355- if (likely(!vma || addr + len <= vma->vm_start)) {
7356+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7357 /*
7358 * Remember the place where we stopped the search:
7359 */
7360@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7361 /* make sure it can fit in the remaining address space */
7362 if (likely(addr > len)) {
7363 vma = find_vma(mm, addr-len);
7364- if (!vma || addr <= vma->vm_start) {
7365+ if (check_heap_stack_gap(vma, addr - len, len)) {
7366 /* remember the address as a hint for next time */
7367 return (mm->free_area_cache = addr-len);
7368 }
7369@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7370 if (unlikely(mm->mmap_base < len))
7371 goto bottomup;
7372
7373- addr = (mm->mmap_base-len) & HPAGE_MASK;
7374+ addr = mm->mmap_base - len;
7375
7376 do {
7377+ addr &= HPAGE_MASK;
7378 /*
7379 * Lookup failure means no vma is above this address,
7380 * else if new region fits below vma->vm_start,
7381 * return with success:
7382 */
7383 vma = find_vma(mm, addr);
7384- if (likely(!vma || addr+len <= vma->vm_start)) {
7385+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7386 /* remember the address as a hint for next time */
7387 return (mm->free_area_cache = addr);
7388 }
7389@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7390 mm->cached_hole_size = vma->vm_start - addr;
7391
7392 /* try just below the current vma->vm_start */
7393- addr = (vma->vm_start-len) & HPAGE_MASK;
7394- } while (likely(len < vma->vm_start));
7395+ addr = skip_heap_stack_gap(vma, len);
7396+ } while (!IS_ERR_VALUE(addr));
7397
7398 bottomup:
7399 /*
7400@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7401 if (addr) {
7402 addr = ALIGN(addr, HPAGE_SIZE);
7403 vma = find_vma(mm, addr);
7404- if (task_size - len >= addr &&
7405- (!vma || addr + len <= vma->vm_start))
7406+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7407 return addr;
7408 }
7409 if (mm->get_unmapped_area == arch_get_unmapped_area)
7410diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
7411index f4500c6..889656c 100644
7412--- a/arch/tile/include/asm/atomic_64.h
7413+++ b/arch/tile/include/asm/atomic_64.h
7414@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7415
7416 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7417
7418+#define atomic64_read_unchecked(v) atomic64_read(v)
7419+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7420+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7421+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7422+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7423+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7424+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7425+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7426+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7427+
7428 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
7429 #define smp_mb__before_atomic_dec() smp_mb()
7430 #define smp_mb__after_atomic_dec() smp_mb()
7431diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
7432index 392e533..536b092 100644
7433--- a/arch/tile/include/asm/cache.h
7434+++ b/arch/tile/include/asm/cache.h
7435@@ -15,11 +15,12 @@
7436 #ifndef _ASM_TILE_CACHE_H
7437 #define _ASM_TILE_CACHE_H
7438
7439+#include <linux/const.h>
7440 #include <arch/chip.h>
7441
7442 /* bytes per L1 data cache line */
7443 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
7444-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7445+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7446
7447 /* bytes per L2 cache line */
7448 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
7449diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
7450index 9ab078a..d6635c2 100644
7451--- a/arch/tile/include/asm/uaccess.h
7452+++ b/arch/tile/include/asm/uaccess.h
7453@@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
7454 const void __user *from,
7455 unsigned long n)
7456 {
7457- int sz = __compiletime_object_size(to);
7458+ size_t sz = __compiletime_object_size(to);
7459
7460- if (likely(sz == -1 || sz >= n))
7461+ if (likely(sz == (size_t)-1 || sz >= n))
7462 n = _copy_from_user(to, from, n);
7463 else
7464 copy_from_user_overflow();
7465diff --git a/arch/um/Makefile b/arch/um/Makefile
7466index 0970910..13adb57a 100644
7467--- a/arch/um/Makefile
7468+++ b/arch/um/Makefile
7469@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7470 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7471 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
7472
7473+ifdef CONSTIFY_PLUGIN
7474+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7475+endif
7476+
7477 #This will adjust *FLAGS accordingly to the platform.
7478 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
7479
7480diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7481index 19e1bdd..3665b77 100644
7482--- a/arch/um/include/asm/cache.h
7483+++ b/arch/um/include/asm/cache.h
7484@@ -1,6 +1,7 @@
7485 #ifndef __UM_CACHE_H
7486 #define __UM_CACHE_H
7487
7488+#include <linux/const.h>
7489
7490 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7491 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7492@@ -12,6 +13,6 @@
7493 # define L1_CACHE_SHIFT 5
7494 #endif
7495
7496-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7497+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7498
7499 #endif
7500diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7501index 6c03acd..a5e0215 100644
7502--- a/arch/um/include/asm/kmap_types.h
7503+++ b/arch/um/include/asm/kmap_types.h
7504@@ -23,6 +23,7 @@ enum km_type {
7505 KM_IRQ1,
7506 KM_SOFTIRQ0,
7507 KM_SOFTIRQ1,
7508+ KM_CLEARPAGE,
7509 KM_TYPE_NR
7510 };
7511
7512diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7513index 7cfc3ce..cbd1a58 100644
7514--- a/arch/um/include/asm/page.h
7515+++ b/arch/um/include/asm/page.h
7516@@ -14,6 +14,9 @@
7517 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7518 #define PAGE_MASK (~(PAGE_SIZE-1))
7519
7520+#define ktla_ktva(addr) (addr)
7521+#define ktva_ktla(addr) (addr)
7522+
7523 #ifndef __ASSEMBLY__
7524
7525 struct page;
7526diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
7527index 0032f92..cd151e0 100644
7528--- a/arch/um/include/asm/pgtable-3level.h
7529+++ b/arch/um/include/asm/pgtable-3level.h
7530@@ -58,6 +58,7 @@
7531 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
7532 #define pud_populate(mm, pud, pmd) \
7533 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
7534+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7535
7536 #ifdef CONFIG_64BIT
7537 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
7538diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7539index ccb9a9d..cc425bb 100644
7540--- a/arch/um/kernel/process.c
7541+++ b/arch/um/kernel/process.c
7542@@ -407,22 +407,6 @@ int singlestepping(void * t)
7543 return 2;
7544 }
7545
7546-/*
7547- * Only x86 and x86_64 have an arch_align_stack().
7548- * All other arches have "#define arch_align_stack(x) (x)"
7549- * in their asm/system.h
7550- * As this is included in UML from asm-um/system-generic.h,
7551- * we can use it to behave as the subarch does.
7552- */
7553-#ifndef arch_align_stack
7554-unsigned long arch_align_stack(unsigned long sp)
7555-{
7556- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7557- sp -= get_random_int() % 8192;
7558- return sp & ~0xf;
7559-}
7560-#endif
7561-
7562 unsigned long get_wchan(struct task_struct *p)
7563 {
7564 unsigned long stack_page, sp, ip;
7565diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7566index ad8f795..2c7eec6 100644
7567--- a/arch/unicore32/include/asm/cache.h
7568+++ b/arch/unicore32/include/asm/cache.h
7569@@ -12,8 +12,10 @@
7570 #ifndef __UNICORE_CACHE_H__
7571 #define __UNICORE_CACHE_H__
7572
7573-#define L1_CACHE_SHIFT (5)
7574-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7575+#include <linux/const.h>
7576+
7577+#define L1_CACHE_SHIFT 5
7578+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7579
7580 /*
7581 * Memory returned by kmalloc() may be used for DMA, so we must make
7582diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7583index c70684f..698fa4b 100644
7584--- a/arch/x86/Kconfig
7585+++ b/arch/x86/Kconfig
7586@@ -218,7 +218,7 @@ config X86_HT
7587
7588 config X86_32_LAZY_GS
7589 def_bool y
7590- depends on X86_32 && !CC_STACKPROTECTOR
7591+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7592
7593 config ARCH_HWEIGHT_CFLAGS
7594 string
7595@@ -1047,7 +1047,7 @@ choice
7596
7597 config NOHIGHMEM
7598 bool "off"
7599- depends on !X86_NUMAQ
7600+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7601 ---help---
7602 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7603 However, the address space of 32-bit x86 processors is only 4
7604@@ -1084,7 +1084,7 @@ config NOHIGHMEM
7605
7606 config HIGHMEM4G
7607 bool "4GB"
7608- depends on !X86_NUMAQ
7609+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7610 ---help---
7611 Select this if you have a 32-bit processor and between 1 and 4
7612 gigabytes of physical RAM.
7613@@ -1138,7 +1138,7 @@ config PAGE_OFFSET
7614 hex
7615 default 0xB0000000 if VMSPLIT_3G_OPT
7616 default 0x80000000 if VMSPLIT_2G
7617- default 0x78000000 if VMSPLIT_2G_OPT
7618+ default 0x70000000 if VMSPLIT_2G_OPT
7619 default 0x40000000 if VMSPLIT_1G
7620 default 0xC0000000
7621 depends on X86_32
7622@@ -1526,6 +1526,7 @@ config SECCOMP
7623
7624 config CC_STACKPROTECTOR
7625 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7626+ depends on X86_64 || !PAX_MEMORY_UDEREF
7627 ---help---
7628 This option turns on the -fstack-protector GCC feature. This
7629 feature puts, at the beginning of functions, a canary value on
7630@@ -1583,6 +1584,7 @@ config KEXEC_JUMP
7631 config PHYSICAL_START
7632 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7633 default "0x1000000"
7634+ range 0x400000 0x40000000
7635 ---help---
7636 This gives the physical address where the kernel is loaded.
7637
7638@@ -1646,6 +1648,7 @@ config X86_NEED_RELOCS
7639 config PHYSICAL_ALIGN
7640 hex "Alignment value to which kernel should be aligned" if X86_32
7641 default "0x1000000"
7642+ range 0x400000 0x1000000 if PAX_KERNEXEC
7643 range 0x2000 0x1000000
7644 ---help---
7645 This value puts the alignment restrictions on physical address
7646@@ -1677,9 +1680,10 @@ config HOTPLUG_CPU
7647 Say N if you want to disable CPU hotplug.
7648
7649 config COMPAT_VDSO
7650- def_bool y
7651+ def_bool n
7652 prompt "Compat VDSO support"
7653 depends on X86_32 || IA32_EMULATION
7654+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7655 ---help---
7656 Map the 32-bit VDSO to the predictable old-style address too.
7657
7658diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7659index 706e12e..62e4feb 100644
7660--- a/arch/x86/Kconfig.cpu
7661+++ b/arch/x86/Kconfig.cpu
7662@@ -334,7 +334,7 @@ config X86_PPRO_FENCE
7663
7664 config X86_F00F_BUG
7665 def_bool y
7666- depends on M586MMX || M586TSC || M586 || M486 || M386
7667+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7668
7669 config X86_INVD_BUG
7670 def_bool y
7671@@ -358,7 +358,7 @@ config X86_POPAD_OK
7672
7673 config X86_ALIGNMENT_16
7674 def_bool y
7675- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7676+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7677
7678 config X86_INTEL_USERCOPY
7679 def_bool y
7680@@ -404,7 +404,7 @@ config X86_CMPXCHG64
7681 # generates cmov.
7682 config X86_CMOV
7683 def_bool y
7684- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7685+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7686
7687 config X86_MINIMUM_CPU_FAMILY
7688 int
7689diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7690index e46c214..ab62fd1 100644
7691--- a/arch/x86/Kconfig.debug
7692+++ b/arch/x86/Kconfig.debug
7693@@ -84,7 +84,7 @@ config X86_PTDUMP
7694 config DEBUG_RODATA
7695 bool "Write protect kernel read-only data structures"
7696 default y
7697- depends on DEBUG_KERNEL
7698+ depends on DEBUG_KERNEL && BROKEN
7699 ---help---
7700 Mark the kernel read-only data as write-protected in the pagetables,
7701 in order to catch accidental (and incorrect) writes to such const
7702@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7703
7704 config DEBUG_SET_MODULE_RONX
7705 bool "Set loadable kernel module data as NX and text as RO"
7706- depends on MODULES
7707+ depends on MODULES && BROKEN
7708 ---help---
7709 This option helps catch unintended modifications to loadable
7710 kernel module's text and read-only data. It also prevents execution
7711@@ -275,7 +275,7 @@ config OPTIMIZE_INLINING
7712
7713 config DEBUG_STRICT_USER_COPY_CHECKS
7714 bool "Strict copy size checks"
7715- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
7716+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
7717 ---help---
7718 Enabling this option turns a certain set of sanity checks for user
7719 copy operations into compile time failures.
7720diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7721index 1f25214..39422b3 100644
7722--- a/arch/x86/Makefile
7723+++ b/arch/x86/Makefile
7724@@ -46,6 +46,7 @@ else
7725 UTS_MACHINE := x86_64
7726 CHECKFLAGS += -D__x86_64__ -m64
7727
7728+ biarch := $(call cc-option,-m64)
7729 KBUILD_AFLAGS += -m64
7730 KBUILD_CFLAGS += -m64
7731
7732@@ -222,3 +223,12 @@ define archhelp
7733 echo ' FDARGS="..." arguments for the booted kernel'
7734 echo ' FDINITRD=file initrd for the booted kernel'
7735 endef
7736+
7737+define OLD_LD
7738+
7739+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7740+*** Please upgrade your binutils to 2.18 or newer
7741+endef
7742+
7743+archprepare:
7744+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7745diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7746index 5a747dd..ff7b12c 100644
7747--- a/arch/x86/boot/Makefile
7748+++ b/arch/x86/boot/Makefile
7749@@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7750 $(call cc-option, -fno-stack-protector) \
7751 $(call cc-option, -mpreferred-stack-boundary=2)
7752 KBUILD_CFLAGS += $(call cc-option, -m32)
7753+ifdef CONSTIFY_PLUGIN
7754+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7755+endif
7756 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7757 GCOV_PROFILE := n
7758
7759diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7760index 878e4b9..20537ab 100644
7761--- a/arch/x86/boot/bitops.h
7762+++ b/arch/x86/boot/bitops.h
7763@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7764 u8 v;
7765 const u32 *p = (const u32 *)addr;
7766
7767- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7768+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7769 return v;
7770 }
7771
7772@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7773
7774 static inline void set_bit(int nr, void *addr)
7775 {
7776- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7777+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7778 }
7779
7780 #endif /* BOOT_BITOPS_H */
7781diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7782index 18997e5..83d9c67 100644
7783--- a/arch/x86/boot/boot.h
7784+++ b/arch/x86/boot/boot.h
7785@@ -85,7 +85,7 @@ static inline void io_delay(void)
7786 static inline u16 ds(void)
7787 {
7788 u16 seg;
7789- asm("movw %%ds,%0" : "=rm" (seg));
7790+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7791 return seg;
7792 }
7793
7794@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7795 static inline int memcmp(const void *s1, const void *s2, size_t len)
7796 {
7797 u8 diff;
7798- asm("repe; cmpsb; setnz %0"
7799+ asm volatile("repe; cmpsb; setnz %0"
7800 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7801 return diff;
7802 }
7803diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7804index e398bb5..3a382ca 100644
7805--- a/arch/x86/boot/compressed/Makefile
7806+++ b/arch/x86/boot/compressed/Makefile
7807@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7808 KBUILD_CFLAGS += $(cflags-y)
7809 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7810 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7811+ifdef CONSTIFY_PLUGIN
7812+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7813+endif
7814
7815 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7816 GCOV_PROFILE := n
7817diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
7818index 4e85f5f..39fa641 100644
7819--- a/arch/x86/boot/compressed/eboot.c
7820+++ b/arch/x86/boot/compressed/eboot.c
7821@@ -142,7 +142,6 @@ again:
7822 *addr = max_addr;
7823 }
7824
7825-free_pool:
7826 efi_call_phys1(sys_table->boottime->free_pool, map);
7827
7828 fail:
7829@@ -206,7 +205,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
7830 if (i == map_size / desc_size)
7831 status = EFI_NOT_FOUND;
7832
7833-free_pool:
7834 efi_call_phys1(sys_table->boottime->free_pool, map);
7835 fail:
7836 return status;
7837diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7838index c85e3ac..6f5aa80 100644
7839--- a/arch/x86/boot/compressed/head_32.S
7840+++ b/arch/x86/boot/compressed/head_32.S
7841@@ -106,7 +106,7 @@ preferred_addr:
7842 notl %eax
7843 andl %eax, %ebx
7844 #else
7845- movl $LOAD_PHYSICAL_ADDR, %ebx
7846+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7847 #endif
7848
7849 /* Target address to relocate to for decompression */
7850@@ -192,7 +192,7 @@ relocated:
7851 * and where it was actually loaded.
7852 */
7853 movl %ebp, %ebx
7854- subl $LOAD_PHYSICAL_ADDR, %ebx
7855+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7856 jz 2f /* Nothing to be done if loaded at compiled addr. */
7857 /*
7858 * Process relocations.
7859@@ -200,8 +200,7 @@ relocated:
7860
7861 1: subl $4, %edi
7862 movl (%edi), %ecx
7863- testl %ecx, %ecx
7864- jz 2f
7865+ jecxz 2f
7866 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7867 jmp 1b
7868 2:
7869diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7870index 87e03a1..0d94c76 100644
7871--- a/arch/x86/boot/compressed/head_64.S
7872+++ b/arch/x86/boot/compressed/head_64.S
7873@@ -91,7 +91,7 @@ ENTRY(startup_32)
7874 notl %eax
7875 andl %eax, %ebx
7876 #else
7877- movl $LOAD_PHYSICAL_ADDR, %ebx
7878+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7879 #endif
7880
7881 /* Target address to relocate to for decompression */
7882@@ -263,7 +263,7 @@ preferred_addr:
7883 notq %rax
7884 andq %rax, %rbp
7885 #else
7886- movq $LOAD_PHYSICAL_ADDR, %rbp
7887+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7888 #endif
7889
7890 /* Target address to relocate to for decompression */
7891diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7892index 7116dcb..d9ae1d7 100644
7893--- a/arch/x86/boot/compressed/misc.c
7894+++ b/arch/x86/boot/compressed/misc.c
7895@@ -310,7 +310,7 @@ static void parse_elf(void *output)
7896 case PT_LOAD:
7897 #ifdef CONFIG_RELOCATABLE
7898 dest = output;
7899- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7900+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7901 #else
7902 dest = (void *)(phdr->p_paddr);
7903 #endif
7904@@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7905 error("Destination address too large");
7906 #endif
7907 #ifndef CONFIG_RELOCATABLE
7908- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7909+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7910 error("Wrong destination address");
7911 #endif
7912
7913diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7914index 4d3ff03..e4972ff 100644
7915--- a/arch/x86/boot/cpucheck.c
7916+++ b/arch/x86/boot/cpucheck.c
7917@@ -74,7 +74,7 @@ static int has_fpu(void)
7918 u16 fcw = -1, fsw = -1;
7919 u32 cr0;
7920
7921- asm("movl %%cr0,%0" : "=r" (cr0));
7922+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
7923 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7924 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7925 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7926@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7927 {
7928 u32 f0, f1;
7929
7930- asm("pushfl ; "
7931+ asm volatile("pushfl ; "
7932 "pushfl ; "
7933 "popl %0 ; "
7934 "movl %0,%1 ; "
7935@@ -115,7 +115,7 @@ static void get_flags(void)
7936 set_bit(X86_FEATURE_FPU, cpu.flags);
7937
7938 if (has_eflag(X86_EFLAGS_ID)) {
7939- asm("cpuid"
7940+ asm volatile("cpuid"
7941 : "=a" (max_intel_level),
7942 "=b" (cpu_vendor[0]),
7943 "=d" (cpu_vendor[1]),
7944@@ -124,7 +124,7 @@ static void get_flags(void)
7945
7946 if (max_intel_level >= 0x00000001 &&
7947 max_intel_level <= 0x0000ffff) {
7948- asm("cpuid"
7949+ asm volatile("cpuid"
7950 : "=a" (tfms),
7951 "=c" (cpu.flags[4]),
7952 "=d" (cpu.flags[0])
7953@@ -136,7 +136,7 @@ static void get_flags(void)
7954 cpu.model += ((tfms >> 16) & 0xf) << 4;
7955 }
7956
7957- asm("cpuid"
7958+ asm volatile("cpuid"
7959 : "=a" (max_amd_level)
7960 : "a" (0x80000000)
7961 : "ebx", "ecx", "edx");
7962@@ -144,7 +144,7 @@ static void get_flags(void)
7963 if (max_amd_level >= 0x80000001 &&
7964 max_amd_level <= 0x8000ffff) {
7965 u32 eax = 0x80000001;
7966- asm("cpuid"
7967+ asm volatile("cpuid"
7968 : "+a" (eax),
7969 "=c" (cpu.flags[6]),
7970 "=d" (cpu.flags[1])
7971@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7972 u32 ecx = MSR_K7_HWCR;
7973 u32 eax, edx;
7974
7975- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7976+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7977 eax &= ~(1 << 15);
7978- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7979+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7980
7981 get_flags(); /* Make sure it really did something */
7982 err = check_flags();
7983@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7984 u32 ecx = MSR_VIA_FCR;
7985 u32 eax, edx;
7986
7987- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7988+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7989 eax |= (1<<1)|(1<<7);
7990- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7991+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7992
7993 set_bit(X86_FEATURE_CX8, cpu.flags);
7994 err = check_flags();
7995@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7996 u32 eax, edx;
7997 u32 level = 1;
7998
7999- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8000- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8001- asm("cpuid"
8002+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8003+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8004+ asm volatile("cpuid"
8005 : "+a" (level), "=d" (cpu.flags[0])
8006 : : "ecx", "ebx");
8007- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8008+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8009
8010 err = check_flags();
8011 }
8012diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8013index efe5acf..22a3784 100644
8014--- a/arch/x86/boot/header.S
8015+++ b/arch/x86/boot/header.S
8016@@ -391,10 +391,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
8017 # single linked list of
8018 # struct setup_data
8019
8020-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8021+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8022
8023 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8024+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
8025+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
8026+#else
8027 #define VO_INIT_SIZE (VO__end - VO__text)
8028+#endif
8029 #if ZO_INIT_SIZE > VO_INIT_SIZE
8030 #define INIT_SIZE ZO_INIT_SIZE
8031 #else
8032diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8033index db75d07..8e6d0af 100644
8034--- a/arch/x86/boot/memory.c
8035+++ b/arch/x86/boot/memory.c
8036@@ -19,7 +19,7 @@
8037
8038 static int detect_memory_e820(void)
8039 {
8040- int count = 0;
8041+ unsigned int count = 0;
8042 struct biosregs ireg, oreg;
8043 struct e820entry *desc = boot_params.e820_map;
8044 static struct e820entry buf; /* static so it is zeroed */
8045diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8046index 11e8c6e..fdbb1ed 100644
8047--- a/arch/x86/boot/video-vesa.c
8048+++ b/arch/x86/boot/video-vesa.c
8049@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8050
8051 boot_params.screen_info.vesapm_seg = oreg.es;
8052 boot_params.screen_info.vesapm_off = oreg.di;
8053+ boot_params.screen_info.vesapm_size = oreg.cx;
8054 }
8055
8056 /*
8057diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8058index 43eda28..5ab5fdb 100644
8059--- a/arch/x86/boot/video.c
8060+++ b/arch/x86/boot/video.c
8061@@ -96,7 +96,7 @@ static void store_mode_params(void)
8062 static unsigned int get_entry(void)
8063 {
8064 char entry_buf[4];
8065- int i, len = 0;
8066+ unsigned int i, len = 0;
8067 int key;
8068 unsigned int v;
8069
8070diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8071index 5b577d5..3c1fed4 100644
8072--- a/arch/x86/crypto/aes-x86_64-asm_64.S
8073+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8074@@ -8,6 +8,8 @@
8075 * including this sentence is retained in full.
8076 */
8077
8078+#include <asm/alternative-asm.h>
8079+
8080 .extern crypto_ft_tab
8081 .extern crypto_it_tab
8082 .extern crypto_fl_tab
8083@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8084 je B192; \
8085 leaq 32(r9),r9;
8086
8087+#define ret pax_force_retaddr 0, 1; ret
8088+
8089 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8090 movq r1,r2; \
8091 movq r3,r4; \
8092diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8093index 3470624..201259d 100644
8094--- a/arch/x86/crypto/aesni-intel_asm.S
8095+++ b/arch/x86/crypto/aesni-intel_asm.S
8096@@ -31,6 +31,7 @@
8097
8098 #include <linux/linkage.h>
8099 #include <asm/inst.h>
8100+#include <asm/alternative-asm.h>
8101
8102 #ifdef __x86_64__
8103 .data
8104@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
8105 pop %r14
8106 pop %r13
8107 pop %r12
8108+ pax_force_retaddr 0, 1
8109 ret
8110+ENDPROC(aesni_gcm_dec)
8111
8112
8113 /*****************************************************************************
8114@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
8115 pop %r14
8116 pop %r13
8117 pop %r12
8118+ pax_force_retaddr 0, 1
8119 ret
8120+ENDPROC(aesni_gcm_enc)
8121
8122 #endif
8123
8124@@ -1714,6 +1719,7 @@ _key_expansion_256a:
8125 pxor %xmm1, %xmm0
8126 movaps %xmm0, (TKEYP)
8127 add $0x10, TKEYP
8128+ pax_force_retaddr_bts
8129 ret
8130
8131 .align 4
8132@@ -1738,6 +1744,7 @@ _key_expansion_192a:
8133 shufps $0b01001110, %xmm2, %xmm1
8134 movaps %xmm1, 0x10(TKEYP)
8135 add $0x20, TKEYP
8136+ pax_force_retaddr_bts
8137 ret
8138
8139 .align 4
8140@@ -1757,6 +1764,7 @@ _key_expansion_192b:
8141
8142 movaps %xmm0, (TKEYP)
8143 add $0x10, TKEYP
8144+ pax_force_retaddr_bts
8145 ret
8146
8147 .align 4
8148@@ -1769,6 +1777,7 @@ _key_expansion_256b:
8149 pxor %xmm1, %xmm2
8150 movaps %xmm2, (TKEYP)
8151 add $0x10, TKEYP
8152+ pax_force_retaddr_bts
8153 ret
8154
8155 /*
8156@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
8157 #ifndef __x86_64__
8158 popl KEYP
8159 #endif
8160+ pax_force_retaddr 0, 1
8161 ret
8162+ENDPROC(aesni_set_key)
8163
8164 /*
8165 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8166@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
8167 popl KLEN
8168 popl KEYP
8169 #endif
8170+ pax_force_retaddr 0, 1
8171 ret
8172+ENDPROC(aesni_enc)
8173
8174 /*
8175 * _aesni_enc1: internal ABI
8176@@ -1959,6 +1972,7 @@ _aesni_enc1:
8177 AESENC KEY STATE
8178 movaps 0x70(TKEYP), KEY
8179 AESENCLAST KEY STATE
8180+ pax_force_retaddr_bts
8181 ret
8182
8183 /*
8184@@ -2067,6 +2081,7 @@ _aesni_enc4:
8185 AESENCLAST KEY STATE2
8186 AESENCLAST KEY STATE3
8187 AESENCLAST KEY STATE4
8188+ pax_force_retaddr_bts
8189 ret
8190
8191 /*
8192@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
8193 popl KLEN
8194 popl KEYP
8195 #endif
8196+ pax_force_retaddr 0, 1
8197 ret
8198+ENDPROC(aesni_dec)
8199
8200 /*
8201 * _aesni_dec1: internal ABI
8202@@ -2146,6 +2163,7 @@ _aesni_dec1:
8203 AESDEC KEY STATE
8204 movaps 0x70(TKEYP), KEY
8205 AESDECLAST KEY STATE
8206+ pax_force_retaddr_bts
8207 ret
8208
8209 /*
8210@@ -2254,6 +2272,7 @@ _aesni_dec4:
8211 AESDECLAST KEY STATE2
8212 AESDECLAST KEY STATE3
8213 AESDECLAST KEY STATE4
8214+ pax_force_retaddr_bts
8215 ret
8216
8217 /*
8218@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8219 popl KEYP
8220 popl LEN
8221 #endif
8222+ pax_force_retaddr 0, 1
8223 ret
8224+ENDPROC(aesni_ecb_enc)
8225
8226 /*
8227 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8228@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8229 popl KEYP
8230 popl LEN
8231 #endif
8232+ pax_force_retaddr 0, 1
8233 ret
8234+ENDPROC(aesni_ecb_dec)
8235
8236 /*
8237 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8238@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8239 popl LEN
8240 popl IVP
8241 #endif
8242+ pax_force_retaddr 0, 1
8243 ret
8244+ENDPROC(aesni_cbc_enc)
8245
8246 /*
8247 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8248@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
8249 popl LEN
8250 popl IVP
8251 #endif
8252+ pax_force_retaddr 0, 1
8253 ret
8254+ENDPROC(aesni_cbc_dec)
8255
8256 #ifdef __x86_64__
8257 .align 16
8258@@ -2526,6 +2553,7 @@ _aesni_inc_init:
8259 mov $1, TCTR_LOW
8260 MOVQ_R64_XMM TCTR_LOW INC
8261 MOVQ_R64_XMM CTR TCTR_LOW
8262+ pax_force_retaddr_bts
8263 ret
8264
8265 /*
8266@@ -2554,6 +2582,7 @@ _aesni_inc:
8267 .Linc_low:
8268 movaps CTR, IV
8269 PSHUFB_XMM BSWAP_MASK IV
8270+ pax_force_retaddr_bts
8271 ret
8272
8273 /*
8274@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
8275 .Lctr_enc_ret:
8276 movups IV, (IVP)
8277 .Lctr_enc_just_ret:
8278+ pax_force_retaddr 0, 1
8279 ret
8280+ENDPROC(aesni_ctr_enc)
8281 #endif
8282diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8283index 391d245..67f35c2 100644
8284--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8285+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8286@@ -20,6 +20,8 @@
8287 *
8288 */
8289
8290+#include <asm/alternative-asm.h>
8291+
8292 .file "blowfish-x86_64-asm.S"
8293 .text
8294
8295@@ -151,9 +153,11 @@ __blowfish_enc_blk:
8296 jnz __enc_xor;
8297
8298 write_block();
8299+ pax_force_retaddr 0, 1
8300 ret;
8301 __enc_xor:
8302 xor_block();
8303+ pax_force_retaddr 0, 1
8304 ret;
8305
8306 .align 8
8307@@ -188,6 +192,7 @@ blowfish_dec_blk:
8308
8309 movq %r11, %rbp;
8310
8311+ pax_force_retaddr 0, 1
8312 ret;
8313
8314 /**********************************************************************
8315@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
8316
8317 popq %rbx;
8318 popq %rbp;
8319+ pax_force_retaddr 0, 1
8320 ret;
8321
8322 __enc_xor4:
8323@@ -349,6 +355,7 @@ __enc_xor4:
8324
8325 popq %rbx;
8326 popq %rbp;
8327+ pax_force_retaddr 0, 1
8328 ret;
8329
8330 .align 8
8331@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
8332 popq %rbx;
8333 popq %rbp;
8334
8335+ pax_force_retaddr 0, 1
8336 ret;
8337
8338diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
8339index 0b33743..7a56206 100644
8340--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
8341+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
8342@@ -20,6 +20,8 @@
8343 *
8344 */
8345
8346+#include <asm/alternative-asm.h>
8347+
8348 .file "camellia-x86_64-asm_64.S"
8349 .text
8350
8351@@ -229,12 +231,14 @@ __enc_done:
8352 enc_outunpack(mov, RT1);
8353
8354 movq RRBP, %rbp;
8355+ pax_force_retaddr 0, 1
8356 ret;
8357
8358 __enc_xor:
8359 enc_outunpack(xor, RT1);
8360
8361 movq RRBP, %rbp;
8362+ pax_force_retaddr 0, 1
8363 ret;
8364
8365 .global camellia_dec_blk;
8366@@ -275,6 +279,7 @@ __dec_rounds16:
8367 dec_outunpack();
8368
8369 movq RRBP, %rbp;
8370+ pax_force_retaddr 0, 1
8371 ret;
8372
8373 /**********************************************************************
8374@@ -468,6 +473,7 @@ __enc2_done:
8375
8376 movq RRBP, %rbp;
8377 popq %rbx;
8378+ pax_force_retaddr 0, 1
8379 ret;
8380
8381 __enc2_xor:
8382@@ -475,6 +481,7 @@ __enc2_xor:
8383
8384 movq RRBP, %rbp;
8385 popq %rbx;
8386+ pax_force_retaddr 0, 1
8387 ret;
8388
8389 .global camellia_dec_blk_2way;
8390@@ -517,4 +524,5 @@ __dec2_rounds16:
8391
8392 movq RRBP, %rbp;
8393 movq RXOR, %rbx;
8394+ pax_force_retaddr 0, 1
8395 ret;
8396diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8397index 6214a9b..1f4fc9a 100644
8398--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8399+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8400@@ -1,3 +1,5 @@
8401+#include <asm/alternative-asm.h>
8402+
8403 # enter ECRYPT_encrypt_bytes
8404 .text
8405 .p2align 5
8406@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8407 add %r11,%rsp
8408 mov %rdi,%rax
8409 mov %rsi,%rdx
8410+ pax_force_retaddr 0, 1
8411 ret
8412 # bytesatleast65:
8413 ._bytesatleast65:
8414@@ -891,6 +894,7 @@ ECRYPT_keysetup:
8415 add %r11,%rsp
8416 mov %rdi,%rax
8417 mov %rsi,%rdx
8418+ pax_force_retaddr
8419 ret
8420 # enter ECRYPT_ivsetup
8421 .text
8422@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8423 add %r11,%rsp
8424 mov %rdi,%rax
8425 mov %rsi,%rdx
8426+ pax_force_retaddr
8427 ret
8428diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8429index 3ee1ff0..cbc568b 100644
8430--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8431+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8432@@ -24,6 +24,8 @@
8433 *
8434 */
8435
8436+#include <asm/alternative-asm.h>
8437+
8438 .file "serpent-sse2-x86_64-asm_64.S"
8439 .text
8440
8441@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
8442 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8443 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8444
8445+ pax_force_retaddr
8446 ret;
8447
8448 __enc_xor8:
8449 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8450 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8451
8452+ pax_force_retaddr
8453 ret;
8454
8455 .align 8
8456@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
8457 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8458 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8459
8460+ pax_force_retaddr
8461 ret;
8462diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8463index b2c2f57..8470cab 100644
8464--- a/arch/x86/crypto/sha1_ssse3_asm.S
8465+++ b/arch/x86/crypto/sha1_ssse3_asm.S
8466@@ -28,6 +28,8 @@
8467 * (at your option) any later version.
8468 */
8469
8470+#include <asm/alternative-asm.h>
8471+
8472 #define CTX %rdi // arg1
8473 #define BUF %rsi // arg2
8474 #define CNT %rdx // arg3
8475@@ -104,6 +106,7 @@
8476 pop %r12
8477 pop %rbp
8478 pop %rbx
8479+ pax_force_retaddr 0, 1
8480 ret
8481
8482 .size \name, .-\name
8483diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8484index 5b012a2..36d5364 100644
8485--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8486+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8487@@ -20,6 +20,8 @@
8488 *
8489 */
8490
8491+#include <asm/alternative-asm.h>
8492+
8493 .file "twofish-x86_64-asm-3way.S"
8494 .text
8495
8496@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8497 popq %r13;
8498 popq %r14;
8499 popq %r15;
8500+ pax_force_retaddr 0, 1
8501 ret;
8502
8503 __enc_xor3:
8504@@ -271,6 +274,7 @@ __enc_xor3:
8505 popq %r13;
8506 popq %r14;
8507 popq %r15;
8508+ pax_force_retaddr 0, 1
8509 ret;
8510
8511 .global twofish_dec_blk_3way
8512@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8513 popq %r13;
8514 popq %r14;
8515 popq %r15;
8516+ pax_force_retaddr 0, 1
8517 ret;
8518
8519diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8520index 7bcf3fc..f53832f 100644
8521--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8522+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8523@@ -21,6 +21,7 @@
8524 .text
8525
8526 #include <asm/asm-offsets.h>
8527+#include <asm/alternative-asm.h>
8528
8529 #define a_offset 0
8530 #define b_offset 4
8531@@ -268,6 +269,7 @@ twofish_enc_blk:
8532
8533 popq R1
8534 movq $1,%rax
8535+ pax_force_retaddr 0, 1
8536 ret
8537
8538 twofish_dec_blk:
8539@@ -319,4 +321,5 @@ twofish_dec_blk:
8540
8541 popq R1
8542 movq $1,%rax
8543+ pax_force_retaddr 0, 1
8544 ret
8545diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8546index 07b3a68..bd2a388 100644
8547--- a/arch/x86/ia32/ia32_aout.c
8548+++ b/arch/x86/ia32/ia32_aout.c
8549@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8550 unsigned long dump_start, dump_size;
8551 struct user32 dump;
8552
8553+ memset(&dump, 0, sizeof(dump));
8554+
8555 fs = get_fs();
8556 set_fs(KERNEL_DS);
8557 has_dumped = 1;
8558diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8559index 673ac9b..7a8c5df 100644
8560--- a/arch/x86/ia32/ia32_signal.c
8561+++ b/arch/x86/ia32/ia32_signal.c
8562@@ -162,7 +162,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8563 }
8564 seg = get_fs();
8565 set_fs(KERNEL_DS);
8566- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8567+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8568 set_fs(seg);
8569 if (ret >= 0 && uoss_ptr) {
8570 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8571@@ -361,7 +361,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8572 */
8573 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8574 size_t frame_size,
8575- void **fpstate)
8576+ void __user **fpstate)
8577 {
8578 unsigned long sp;
8579
8580@@ -382,7 +382,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8581
8582 if (used_math()) {
8583 sp = sp - sig_xstate_ia32_size;
8584- *fpstate = (struct _fpstate_ia32 *) sp;
8585+ *fpstate = (struct _fpstate_ia32 __user *) sp;
8586 if (save_i387_xstate_ia32(*fpstate) < 0)
8587 return (void __user *) -1L;
8588 }
8589@@ -390,7 +390,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8590 sp -= frame_size;
8591 /* Align the stack pointer according to the i386 ABI,
8592 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8593- sp = ((sp + 4) & -16ul) - 4;
8594+ sp = ((sp - 12) & -16ul) - 4;
8595 return (void __user *) sp;
8596 }
8597
8598@@ -448,7 +448,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8599 * These are actually not used anymore, but left because some
8600 * gdb versions depend on them as a marker.
8601 */
8602- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8603+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8604 } put_user_catch(err);
8605
8606 if (err)
8607@@ -490,7 +490,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8608 0xb8,
8609 __NR_ia32_rt_sigreturn,
8610 0x80cd,
8611- 0,
8612+ 0
8613 };
8614
8615 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8616@@ -520,16 +520,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8617
8618 if (ka->sa.sa_flags & SA_RESTORER)
8619 restorer = ka->sa.sa_restorer;
8620+ else if (current->mm->context.vdso)
8621+ /* Return stub is in 32bit vsyscall page */
8622+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8623 else
8624- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8625- rt_sigreturn);
8626+ restorer = &frame->retcode;
8627 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8628
8629 /*
8630 * Not actually used anymore, but left because some gdb
8631 * versions need it.
8632 */
8633- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8634+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8635 } put_user_catch(err);
8636
8637 if (err)
8638diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8639index 20e5f7b..f33c779 100644
8640--- a/arch/x86/ia32/ia32entry.S
8641+++ b/arch/x86/ia32/ia32entry.S
8642@@ -14,8 +14,10 @@
8643 #include <asm/segment.h>
8644 #include <asm/irqflags.h>
8645 #include <asm/asm.h>
8646+#include <asm/pgtable.h>
8647 #include <linux/linkage.h>
8648 #include <linux/err.h>
8649+#include <asm/alternative-asm.h>
8650
8651 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8652 #include <linux/elf-em.h>
8653@@ -95,6 +97,32 @@ ENTRY(native_irq_enable_sysexit)
8654 ENDPROC(native_irq_enable_sysexit)
8655 #endif
8656
8657+ .macro pax_enter_kernel_user
8658+ pax_set_fptr_mask
8659+#ifdef CONFIG_PAX_MEMORY_UDEREF
8660+ call pax_enter_kernel_user
8661+#endif
8662+ .endm
8663+
8664+ .macro pax_exit_kernel_user
8665+#ifdef CONFIG_PAX_MEMORY_UDEREF
8666+ call pax_exit_kernel_user
8667+#endif
8668+#ifdef CONFIG_PAX_RANDKSTACK
8669+ pushq %rax
8670+ pushq %r11
8671+ call pax_randomize_kstack
8672+ popq %r11
8673+ popq %rax
8674+#endif
8675+ .endm
8676+
8677+.macro pax_erase_kstack
8678+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8679+ call pax_erase_kstack
8680+#endif
8681+.endm
8682+
8683 /*
8684 * 32bit SYSENTER instruction entry.
8685 *
8686@@ -121,12 +149,6 @@ ENTRY(ia32_sysenter_target)
8687 CFI_REGISTER rsp,rbp
8688 SWAPGS_UNSAFE_STACK
8689 movq PER_CPU_VAR(kernel_stack), %rsp
8690- addq $(KERNEL_STACK_OFFSET),%rsp
8691- /*
8692- * No need to follow this irqs on/off section: the syscall
8693- * disabled irqs, here we enable it straight after entry:
8694- */
8695- ENABLE_INTERRUPTS(CLBR_NONE)
8696 movl %ebp,%ebp /* zero extension */
8697 pushq_cfi $__USER32_DS
8698 /*CFI_REL_OFFSET ss,0*/
8699@@ -134,22 +156,42 @@ ENTRY(ia32_sysenter_target)
8700 CFI_REL_OFFSET rsp,0
8701 pushfq_cfi
8702 /*CFI_REL_OFFSET rflags,0*/
8703- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8704- CFI_REGISTER rip,r10
8705+ orl $X86_EFLAGS_IF,(%rsp)
8706+ GET_THREAD_INFO(%r11)
8707+ movl TI_sysenter_return(%r11), %r11d
8708+ CFI_REGISTER rip,r11
8709 pushq_cfi $__USER32_CS
8710 /*CFI_REL_OFFSET cs,0*/
8711 movl %eax, %eax
8712- pushq_cfi %r10
8713+ pushq_cfi %r11
8714 CFI_REL_OFFSET rip,0
8715 pushq_cfi %rax
8716 cld
8717 SAVE_ARGS 0,1,0
8718+ pax_enter_kernel_user
8719+
8720+#ifdef CONFIG_PAX_RANDKSTACK
8721+ pax_erase_kstack
8722+#endif
8723+
8724+ /*
8725+ * No need to follow this irqs on/off section: the syscall
8726+ * disabled irqs, here we enable it straight after entry:
8727+ */
8728+ ENABLE_INTERRUPTS(CLBR_NONE)
8729 /* no need to do an access_ok check here because rbp has been
8730 32bit zero extended */
8731+
8732+#ifdef CONFIG_PAX_MEMORY_UDEREF
8733+ mov $PAX_USER_SHADOW_BASE,%r11
8734+ add %r11,%rbp
8735+#endif
8736+
8737 1: movl (%rbp),%ebp
8738 _ASM_EXTABLE(1b,ia32_badarg)
8739- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8740- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8741+ GET_THREAD_INFO(%r11)
8742+ orl $TS_COMPAT,TI_status(%r11)
8743+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8744 CFI_REMEMBER_STATE
8745 jnz sysenter_tracesys
8746 cmpq $(IA32_NR_syscalls-1),%rax
8747@@ -159,12 +201,15 @@ sysenter_do_call:
8748 sysenter_dispatch:
8749 call *ia32_sys_call_table(,%rax,8)
8750 movq %rax,RAX-ARGOFFSET(%rsp)
8751+ GET_THREAD_INFO(%r11)
8752 DISABLE_INTERRUPTS(CLBR_NONE)
8753 TRACE_IRQS_OFF
8754- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8755+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8756 jnz sysexit_audit
8757 sysexit_from_sys_call:
8758- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8759+ pax_exit_kernel_user
8760+ pax_erase_kstack
8761+ andl $~TS_COMPAT,TI_status(%r11)
8762 /* clear IF, that popfq doesn't enable interrupts early */
8763 andl $~0x200,EFLAGS-R11(%rsp)
8764 movl RIP-R11(%rsp),%edx /* User %eip */
8765@@ -190,6 +235,9 @@ sysexit_from_sys_call:
8766 movl %eax,%esi /* 2nd arg: syscall number */
8767 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8768 call __audit_syscall_entry
8769+
8770+ pax_erase_kstack
8771+
8772 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8773 cmpq $(IA32_NR_syscalls-1),%rax
8774 ja ia32_badsys
8775@@ -201,7 +249,7 @@ sysexit_from_sys_call:
8776 .endm
8777
8778 .macro auditsys_exit exit
8779- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8780+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8781 jnz ia32_ret_from_sys_call
8782 TRACE_IRQS_ON
8783 sti
8784@@ -212,11 +260,12 @@ sysexit_from_sys_call:
8785 1: setbe %al /* 1 if error, 0 if not */
8786 movzbl %al,%edi /* zero-extend that into %edi */
8787 call __audit_syscall_exit
8788+ GET_THREAD_INFO(%r11)
8789 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8790 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8791 cli
8792 TRACE_IRQS_OFF
8793- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8794+ testl %edi,TI_flags(%r11)
8795 jz \exit
8796 CLEAR_RREGS -ARGOFFSET
8797 jmp int_with_check
8798@@ -234,7 +283,7 @@ sysexit_audit:
8799
8800 sysenter_tracesys:
8801 #ifdef CONFIG_AUDITSYSCALL
8802- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8803+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8804 jz sysenter_auditsys
8805 #endif
8806 SAVE_REST
8807@@ -246,6 +295,9 @@ sysenter_tracesys:
8808 RESTORE_REST
8809 cmpq $(IA32_NR_syscalls-1),%rax
8810 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
8811+
8812+ pax_erase_kstack
8813+
8814 jmp sysenter_do_call
8815 CFI_ENDPROC
8816 ENDPROC(ia32_sysenter_target)
8817@@ -273,19 +325,25 @@ ENDPROC(ia32_sysenter_target)
8818 ENTRY(ia32_cstar_target)
8819 CFI_STARTPROC32 simple
8820 CFI_SIGNAL_FRAME
8821- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8822+ CFI_DEF_CFA rsp,0
8823 CFI_REGISTER rip,rcx
8824 /*CFI_REGISTER rflags,r11*/
8825 SWAPGS_UNSAFE_STACK
8826 movl %esp,%r8d
8827 CFI_REGISTER rsp,r8
8828 movq PER_CPU_VAR(kernel_stack),%rsp
8829+ SAVE_ARGS 8*6,0,0
8830+ pax_enter_kernel_user
8831+
8832+#ifdef CONFIG_PAX_RANDKSTACK
8833+ pax_erase_kstack
8834+#endif
8835+
8836 /*
8837 * No need to follow this irqs on/off section: the syscall
8838 * disabled irqs and here we enable it straight after entry:
8839 */
8840 ENABLE_INTERRUPTS(CLBR_NONE)
8841- SAVE_ARGS 8,0,0
8842 movl %eax,%eax /* zero extension */
8843 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8844 movq %rcx,RIP-ARGOFFSET(%rsp)
8845@@ -301,10 +359,17 @@ ENTRY(ia32_cstar_target)
8846 /* no need to do an access_ok check here because r8 has been
8847 32bit zero extended */
8848 /* hardware stack frame is complete now */
8849+
8850+#ifdef CONFIG_PAX_MEMORY_UDEREF
8851+ mov $PAX_USER_SHADOW_BASE,%r11
8852+ add %r11,%r8
8853+#endif
8854+
8855 1: movl (%r8),%r9d
8856 _ASM_EXTABLE(1b,ia32_badarg)
8857- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8858- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8859+ GET_THREAD_INFO(%r11)
8860+ orl $TS_COMPAT,TI_status(%r11)
8861+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8862 CFI_REMEMBER_STATE
8863 jnz cstar_tracesys
8864 cmpq $IA32_NR_syscalls-1,%rax
8865@@ -314,12 +379,15 @@ cstar_do_call:
8866 cstar_dispatch:
8867 call *ia32_sys_call_table(,%rax,8)
8868 movq %rax,RAX-ARGOFFSET(%rsp)
8869+ GET_THREAD_INFO(%r11)
8870 DISABLE_INTERRUPTS(CLBR_NONE)
8871 TRACE_IRQS_OFF
8872- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8873+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8874 jnz sysretl_audit
8875 sysretl_from_sys_call:
8876- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8877+ pax_exit_kernel_user
8878+ pax_erase_kstack
8879+ andl $~TS_COMPAT,TI_status(%r11)
8880 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8881 movl RIP-ARGOFFSET(%rsp),%ecx
8882 CFI_REGISTER rip,rcx
8883@@ -347,7 +415,7 @@ sysretl_audit:
8884
8885 cstar_tracesys:
8886 #ifdef CONFIG_AUDITSYSCALL
8887- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8888+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8889 jz cstar_auditsys
8890 #endif
8891 xchgl %r9d,%ebp
8892@@ -361,6 +429,9 @@ cstar_tracesys:
8893 xchgl %ebp,%r9d
8894 cmpq $(IA32_NR_syscalls-1),%rax
8895 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
8896+
8897+ pax_erase_kstack
8898+
8899 jmp cstar_do_call
8900 END(ia32_cstar_target)
8901
8902@@ -401,19 +472,26 @@ ENTRY(ia32_syscall)
8903 CFI_REL_OFFSET rip,RIP-RIP
8904 PARAVIRT_ADJUST_EXCEPTION_FRAME
8905 SWAPGS
8906- /*
8907- * No need to follow this irqs on/off section: the syscall
8908- * disabled irqs and here we enable it straight after entry:
8909- */
8910- ENABLE_INTERRUPTS(CLBR_NONE)
8911 movl %eax,%eax
8912 pushq_cfi %rax
8913 cld
8914 /* note the registers are not zero extended to the sf.
8915 this could be a problem. */
8916 SAVE_ARGS 0,1,0
8917- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8918- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8919+ pax_enter_kernel_user
8920+
8921+#ifdef CONFIG_PAX_RANDKSTACK
8922+ pax_erase_kstack
8923+#endif
8924+
8925+ /*
8926+ * No need to follow this irqs on/off section: the syscall
8927+ * disabled irqs and here we enable it straight after entry:
8928+ */
8929+ ENABLE_INTERRUPTS(CLBR_NONE)
8930+ GET_THREAD_INFO(%r11)
8931+ orl $TS_COMPAT,TI_status(%r11)
8932+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8933 jnz ia32_tracesys
8934 cmpq $(IA32_NR_syscalls-1),%rax
8935 ja ia32_badsys
8936@@ -436,6 +514,9 @@ ia32_tracesys:
8937 RESTORE_REST
8938 cmpq $(IA32_NR_syscalls-1),%rax
8939 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
8940+
8941+ pax_erase_kstack
8942+
8943 jmp ia32_do_call
8944 END(ia32_syscall)
8945
8946diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8947index 4540bec..714d913 100644
8948--- a/arch/x86/ia32/sys_ia32.c
8949+++ b/arch/x86/ia32/sys_ia32.c
8950@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8951 */
8952 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8953 {
8954- typeof(ubuf->st_uid) uid = 0;
8955- typeof(ubuf->st_gid) gid = 0;
8956+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
8957+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
8958 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
8959 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
8960 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8961@@ -287,7 +287,7 @@ asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 __user *act,
8962 return ret;
8963 }
8964
8965-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
8966+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
8967 int options)
8968 {
8969 return compat_sys_wait4(pid, stat_addr, options, NULL);
8970@@ -303,7 +303,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8971 mm_segment_t old_fs = get_fs();
8972
8973 set_fs(KERNEL_DS);
8974- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8975+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8976 set_fs(old_fs);
8977 if (put_compat_timespec(&t, interval))
8978 return -EFAULT;
8979@@ -319,7 +319,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8980 mm_segment_t old_fs = get_fs();
8981
8982 set_fs(KERNEL_DS);
8983- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8984+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8985 set_fs(old_fs);
8986 if (!ret) {
8987 switch (_NSIG_WORDS) {
8988@@ -344,7 +344,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8989 if (copy_siginfo_from_user32(&info, uinfo))
8990 return -EFAULT;
8991 set_fs(KERNEL_DS);
8992- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8993+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8994 set_fs(old_fs);
8995 return ret;
8996 }
8997@@ -376,7 +376,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8998 return -EFAULT;
8999
9000 set_fs(KERNEL_DS);
9001- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9002+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9003 count);
9004 set_fs(old_fs);
9005
9006diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
9007index 952bd01..7692c6f 100644
9008--- a/arch/x86/include/asm/alternative-asm.h
9009+++ b/arch/x86/include/asm/alternative-asm.h
9010@@ -15,6 +15,45 @@
9011 .endm
9012 #endif
9013
9014+#ifdef KERNEXEC_PLUGIN
9015+ .macro pax_force_retaddr_bts rip=0
9016+ btsq $63,\rip(%rsp)
9017+ .endm
9018+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9019+ .macro pax_force_retaddr rip=0, reload=0
9020+ btsq $63,\rip(%rsp)
9021+ .endm
9022+ .macro pax_force_fptr ptr
9023+ btsq $63,\ptr
9024+ .endm
9025+ .macro pax_set_fptr_mask
9026+ .endm
9027+#endif
9028+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9029+ .macro pax_force_retaddr rip=0, reload=0
9030+ .if \reload
9031+ pax_set_fptr_mask
9032+ .endif
9033+ orq %r10,\rip(%rsp)
9034+ .endm
9035+ .macro pax_force_fptr ptr
9036+ orq %r10,\ptr
9037+ .endm
9038+ .macro pax_set_fptr_mask
9039+ movabs $0x8000000000000000,%r10
9040+ .endm
9041+#endif
9042+#else
9043+ .macro pax_force_retaddr rip=0, reload=0
9044+ .endm
9045+ .macro pax_force_fptr ptr
9046+ .endm
9047+ .macro pax_force_retaddr_bts rip=0
9048+ .endm
9049+ .macro pax_set_fptr_mask
9050+ .endm
9051+#endif
9052+
9053 .macro altinstruction_entry orig alt feature orig_len alt_len
9054 .long \orig - .
9055 .long \alt - .
9056diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9057index 49331be..9706065 100644
9058--- a/arch/x86/include/asm/alternative.h
9059+++ b/arch/x86/include/asm/alternative.h
9060@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
9061 ".section .discard,\"aw\",@progbits\n" \
9062 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
9063 ".previous\n" \
9064- ".section .altinstr_replacement, \"ax\"\n" \
9065+ ".section .altinstr_replacement, \"a\"\n" \
9066 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9067 ".previous"
9068
9069diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9070index eaff479..1eff9b5 100644
9071--- a/arch/x86/include/asm/apic.h
9072+++ b/arch/x86/include/asm/apic.h
9073@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
9074
9075 #ifdef CONFIG_X86_LOCAL_APIC
9076
9077-extern unsigned int apic_verbosity;
9078+extern int apic_verbosity;
9079 extern int local_apic_timer_c2_ok;
9080
9081 extern int disable_apic;
9082@@ -390,7 +390,7 @@ struct apic {
9083 */
9084 int (*x86_32_numa_cpu_node)(int cpu);
9085 #endif
9086-};
9087+} __do_const;
9088
9089 /*
9090 * Pointer to the local APIC driver in use on this system (there's
9091diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9092index 20370c6..a2eb9b0 100644
9093--- a/arch/x86/include/asm/apm.h
9094+++ b/arch/x86/include/asm/apm.h
9095@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9096 __asm__ __volatile__(APM_DO_ZERO_SEGS
9097 "pushl %%edi\n\t"
9098 "pushl %%ebp\n\t"
9099- "lcall *%%cs:apm_bios_entry\n\t"
9100+ "lcall *%%ss:apm_bios_entry\n\t"
9101 "setc %%al\n\t"
9102 "popl %%ebp\n\t"
9103 "popl %%edi\n\t"
9104@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9105 __asm__ __volatile__(APM_DO_ZERO_SEGS
9106 "pushl %%edi\n\t"
9107 "pushl %%ebp\n\t"
9108- "lcall *%%cs:apm_bios_entry\n\t"
9109+ "lcall *%%ss:apm_bios_entry\n\t"
9110 "setc %%bl\n\t"
9111 "popl %%ebp\n\t"
9112 "popl %%edi\n\t"
9113diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
9114index 58cb6d4..a4b806c 100644
9115--- a/arch/x86/include/asm/atomic.h
9116+++ b/arch/x86/include/asm/atomic.h
9117@@ -22,7 +22,18 @@
9118 */
9119 static inline int atomic_read(const atomic_t *v)
9120 {
9121- return (*(volatile int *)&(v)->counter);
9122+ return (*(volatile const int *)&(v)->counter);
9123+}
9124+
9125+/**
9126+ * atomic_read_unchecked - read atomic variable
9127+ * @v: pointer of type atomic_unchecked_t
9128+ *
9129+ * Atomically reads the value of @v.
9130+ */
9131+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9132+{
9133+ return (*(volatile const int *)&(v)->counter);
9134 }
9135
9136 /**
9137@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
9138 }
9139
9140 /**
9141+ * atomic_set_unchecked - set atomic variable
9142+ * @v: pointer of type atomic_unchecked_t
9143+ * @i: required value
9144+ *
9145+ * Atomically sets the value of @v to @i.
9146+ */
9147+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9148+{
9149+ v->counter = i;
9150+}
9151+
9152+/**
9153 * atomic_add - add integer to atomic variable
9154 * @i: integer value to add
9155 * @v: pointer of type atomic_t
9156@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
9157 */
9158 static inline void atomic_add(int i, atomic_t *v)
9159 {
9160- asm volatile(LOCK_PREFIX "addl %1,%0"
9161+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9162+
9163+#ifdef CONFIG_PAX_REFCOUNT
9164+ "jno 0f\n"
9165+ LOCK_PREFIX "subl %1,%0\n"
9166+ "int $4\n0:\n"
9167+ _ASM_EXTABLE(0b, 0b)
9168+#endif
9169+
9170+ : "+m" (v->counter)
9171+ : "ir" (i));
9172+}
9173+
9174+/**
9175+ * atomic_add_unchecked - add integer to atomic variable
9176+ * @i: integer value to add
9177+ * @v: pointer of type atomic_unchecked_t
9178+ *
9179+ * Atomically adds @i to @v.
9180+ */
9181+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9182+{
9183+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9184 : "+m" (v->counter)
9185 : "ir" (i));
9186 }
9187@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
9188 */
9189 static inline void atomic_sub(int i, atomic_t *v)
9190 {
9191- asm volatile(LOCK_PREFIX "subl %1,%0"
9192+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9193+
9194+#ifdef CONFIG_PAX_REFCOUNT
9195+ "jno 0f\n"
9196+ LOCK_PREFIX "addl %1,%0\n"
9197+ "int $4\n0:\n"
9198+ _ASM_EXTABLE(0b, 0b)
9199+#endif
9200+
9201+ : "+m" (v->counter)
9202+ : "ir" (i));
9203+}
9204+
9205+/**
9206+ * atomic_sub_unchecked - subtract integer from atomic variable
9207+ * @i: integer value to subtract
9208+ * @v: pointer of type atomic_unchecked_t
9209+ *
9210+ * Atomically subtracts @i from @v.
9211+ */
9212+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9213+{
9214+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9215 : "+m" (v->counter)
9216 : "ir" (i));
9217 }
9218@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9219 {
9220 unsigned char c;
9221
9222- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9223+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
9224+
9225+#ifdef CONFIG_PAX_REFCOUNT
9226+ "jno 0f\n"
9227+ LOCK_PREFIX "addl %2,%0\n"
9228+ "int $4\n0:\n"
9229+ _ASM_EXTABLE(0b, 0b)
9230+#endif
9231+
9232+ "sete %1\n"
9233 : "+m" (v->counter), "=qm" (c)
9234 : "ir" (i) : "memory");
9235 return c;
9236@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9237 */
9238 static inline void atomic_inc(atomic_t *v)
9239 {
9240- asm volatile(LOCK_PREFIX "incl %0"
9241+ asm volatile(LOCK_PREFIX "incl %0\n"
9242+
9243+#ifdef CONFIG_PAX_REFCOUNT
9244+ "jno 0f\n"
9245+ LOCK_PREFIX "decl %0\n"
9246+ "int $4\n0:\n"
9247+ _ASM_EXTABLE(0b, 0b)
9248+#endif
9249+
9250+ : "+m" (v->counter));
9251+}
9252+
9253+/**
9254+ * atomic_inc_unchecked - increment atomic variable
9255+ * @v: pointer of type atomic_unchecked_t
9256+ *
9257+ * Atomically increments @v by 1.
9258+ */
9259+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9260+{
9261+ asm volatile(LOCK_PREFIX "incl %0\n"
9262 : "+m" (v->counter));
9263 }
9264
9265@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
9266 */
9267 static inline void atomic_dec(atomic_t *v)
9268 {
9269- asm volatile(LOCK_PREFIX "decl %0"
9270+ asm volatile(LOCK_PREFIX "decl %0\n"
9271+
9272+#ifdef CONFIG_PAX_REFCOUNT
9273+ "jno 0f\n"
9274+ LOCK_PREFIX "incl %0\n"
9275+ "int $4\n0:\n"
9276+ _ASM_EXTABLE(0b, 0b)
9277+#endif
9278+
9279+ : "+m" (v->counter));
9280+}
9281+
9282+/**
9283+ * atomic_dec_unchecked - decrement atomic variable
9284+ * @v: pointer of type atomic_unchecked_t
9285+ *
9286+ * Atomically decrements @v by 1.
9287+ */
9288+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9289+{
9290+ asm volatile(LOCK_PREFIX "decl %0\n"
9291 : "+m" (v->counter));
9292 }
9293
9294@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9295 {
9296 unsigned char c;
9297
9298- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9299+ asm volatile(LOCK_PREFIX "decl %0\n"
9300+
9301+#ifdef CONFIG_PAX_REFCOUNT
9302+ "jno 0f\n"
9303+ LOCK_PREFIX "incl %0\n"
9304+ "int $4\n0:\n"
9305+ _ASM_EXTABLE(0b, 0b)
9306+#endif
9307+
9308+ "sete %1\n"
9309 : "+m" (v->counter), "=qm" (c)
9310 : : "memory");
9311 return c != 0;
9312@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9313 {
9314 unsigned char c;
9315
9316- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9317+ asm volatile(LOCK_PREFIX "incl %0\n"
9318+
9319+#ifdef CONFIG_PAX_REFCOUNT
9320+ "jno 0f\n"
9321+ LOCK_PREFIX "decl %0\n"
9322+ "int $4\n0:\n"
9323+ _ASM_EXTABLE(0b, 0b)
9324+#endif
9325+
9326+ "sete %1\n"
9327+ : "+m" (v->counter), "=qm" (c)
9328+ : : "memory");
9329+ return c != 0;
9330+}
9331+
9332+/**
9333+ * atomic_inc_and_test_unchecked - increment and test
9334+ * @v: pointer of type atomic_unchecked_t
9335+ *
9336+ * Atomically increments @v by 1
9337+ * and returns true if the result is zero, or false for all
9338+ * other cases.
9339+ */
9340+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9341+{
9342+ unsigned char c;
9343+
9344+ asm volatile(LOCK_PREFIX "incl %0\n"
9345+ "sete %1\n"
9346 : "+m" (v->counter), "=qm" (c)
9347 : : "memory");
9348 return c != 0;
9349@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9350 {
9351 unsigned char c;
9352
9353- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9354+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9355+
9356+#ifdef CONFIG_PAX_REFCOUNT
9357+ "jno 0f\n"
9358+ LOCK_PREFIX "subl %2,%0\n"
9359+ "int $4\n0:\n"
9360+ _ASM_EXTABLE(0b, 0b)
9361+#endif
9362+
9363+ "sets %1\n"
9364 : "+m" (v->counter), "=qm" (c)
9365 : "ir" (i) : "memory");
9366 return c;
9367@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
9368 goto no_xadd;
9369 #endif
9370 /* Modern 486+ processor */
9371- return i + xadd(&v->counter, i);
9372+ return i + xadd_check_overflow(&v->counter, i);
9373
9374 #ifdef CONFIG_M386
9375 no_xadd: /* Legacy 386 processor */
9376@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
9377 }
9378
9379 /**
9380+ * atomic_add_return_unchecked - add integer and return
9381+ * @i: integer value to add
9382+ * @v: pointer of type atomic_unchecked_t
9383+ *
9384+ * Atomically adds @i to @v and returns @i + @v
9385+ */
9386+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9387+{
9388+#ifdef CONFIG_M386
9389+ int __i;
9390+ unsigned long flags;
9391+ if (unlikely(boot_cpu_data.x86 <= 3))
9392+ goto no_xadd;
9393+#endif
9394+ /* Modern 486+ processor */
9395+ return i + xadd(&v->counter, i);
9396+
9397+#ifdef CONFIG_M386
9398+no_xadd: /* Legacy 386 processor */
9399+ raw_local_irq_save(flags);
9400+ __i = atomic_read_unchecked(v);
9401+ atomic_set_unchecked(v, i + __i);
9402+ raw_local_irq_restore(flags);
9403+ return i + __i;
9404+#endif
9405+}
9406+
9407+/**
9408 * atomic_sub_return - subtract integer and return
9409 * @v: pointer of type atomic_t
9410 * @i: integer value to subtract
9411@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9412 }
9413
9414 #define atomic_inc_return(v) (atomic_add_return(1, v))
9415+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9416+{
9417+ return atomic_add_return_unchecked(1, v);
9418+}
9419 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9420
9421 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9422@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9423 return cmpxchg(&v->counter, old, new);
9424 }
9425
9426+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9427+{
9428+ return cmpxchg(&v->counter, old, new);
9429+}
9430+
9431 static inline int atomic_xchg(atomic_t *v, int new)
9432 {
9433 return xchg(&v->counter, new);
9434 }
9435
9436+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9437+{
9438+ return xchg(&v->counter, new);
9439+}
9440+
9441 /**
9442 * __atomic_add_unless - add unless the number is already a given value
9443 * @v: pointer of type atomic_t
9444@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9445 */
9446 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9447 {
9448- int c, old;
9449+ int c, old, new;
9450 c = atomic_read(v);
9451 for (;;) {
9452- if (unlikely(c == (u)))
9453+ if (unlikely(c == u))
9454 break;
9455- old = atomic_cmpxchg((v), c, c + (a));
9456+
9457+ asm volatile("addl %2,%0\n"
9458+
9459+#ifdef CONFIG_PAX_REFCOUNT
9460+ "jno 0f\n"
9461+ "subl %2,%0\n"
9462+ "int $4\n0:\n"
9463+ _ASM_EXTABLE(0b, 0b)
9464+#endif
9465+
9466+ : "=r" (new)
9467+ : "0" (c), "ir" (a));
9468+
9469+ old = atomic_cmpxchg(v, c, new);
9470 if (likely(old == c))
9471 break;
9472 c = old;
9473@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9474 return c;
9475 }
9476
9477+/**
9478+ * atomic_inc_not_zero_hint - increment if not null
9479+ * @v: pointer of type atomic_t
9480+ * @hint: probable value of the atomic before the increment
9481+ *
9482+ * This version of atomic_inc_not_zero() gives a hint of probable
9483+ * value of the atomic. This helps processor to not read the memory
9484+ * before doing the atomic read/modify/write cycle, lowering
9485+ * number of bus transactions on some arches.
9486+ *
9487+ * Returns: 0 if increment was not done, 1 otherwise.
9488+ */
9489+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9490+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9491+{
9492+ int val, c = hint, new;
9493+
9494+ /* sanity test, should be removed by compiler if hint is a constant */
9495+ if (!hint)
9496+ return __atomic_add_unless(v, 1, 0);
9497+
9498+ do {
9499+ asm volatile("incl %0\n"
9500+
9501+#ifdef CONFIG_PAX_REFCOUNT
9502+ "jno 0f\n"
9503+ "decl %0\n"
9504+ "int $4\n0:\n"
9505+ _ASM_EXTABLE(0b, 0b)
9506+#endif
9507+
9508+ : "=r" (new)
9509+ : "0" (c));
9510+
9511+ val = atomic_cmpxchg(v, c, new);
9512+ if (val == c)
9513+ return 1;
9514+ c = val;
9515+ } while (c);
9516+
9517+ return 0;
9518+}
9519
9520 /*
9521 * atomic_dec_if_positive - decrement by 1 if old value positive
9522@@ -293,14 +552,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
9523 #endif
9524
9525 /* These are x86-specific, used by some header files */
9526-#define atomic_clear_mask(mask, addr) \
9527- asm volatile(LOCK_PREFIX "andl %0,%1" \
9528- : : "r" (~(mask)), "m" (*(addr)) : "memory")
9529+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
9530+{
9531+ asm volatile(LOCK_PREFIX "andl %1,%0"
9532+ : "+m" (v->counter)
9533+ : "r" (~(mask))
9534+ : "memory");
9535+}
9536
9537-#define atomic_set_mask(mask, addr) \
9538- asm volatile(LOCK_PREFIX "orl %0,%1" \
9539- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
9540- : "memory")
9541+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
9542+{
9543+ asm volatile(LOCK_PREFIX "andl %1,%0"
9544+ : "+m" (v->counter)
9545+ : "r" (~(mask))
9546+ : "memory");
9547+}
9548+
9549+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
9550+{
9551+ asm volatile(LOCK_PREFIX "orl %1,%0"
9552+ : "+m" (v->counter)
9553+ : "r" (mask)
9554+ : "memory");
9555+}
9556+
9557+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
9558+{
9559+ asm volatile(LOCK_PREFIX "orl %1,%0"
9560+ : "+m" (v->counter)
9561+ : "r" (mask)
9562+ : "memory");
9563+}
9564
9565 /* Atomic operations are already serializing on x86 */
9566 #define smp_mb__before_atomic_dec() barrier()
9567diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9568index b154de7..aadebd8 100644
9569--- a/arch/x86/include/asm/atomic64_32.h
9570+++ b/arch/x86/include/asm/atomic64_32.h
9571@@ -12,6 +12,14 @@ typedef struct {
9572 u64 __aligned(8) counter;
9573 } atomic64_t;
9574
9575+#ifdef CONFIG_PAX_REFCOUNT
9576+typedef struct {
9577+ u64 __aligned(8) counter;
9578+} atomic64_unchecked_t;
9579+#else
9580+typedef atomic64_t atomic64_unchecked_t;
9581+#endif
9582+
9583 #define ATOMIC64_INIT(val) { (val) }
9584
9585 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
9586@@ -37,21 +45,31 @@ typedef struct {
9587 ATOMIC64_DECL_ONE(sym##_386)
9588
9589 ATOMIC64_DECL_ONE(add_386);
9590+ATOMIC64_DECL_ONE(add_unchecked_386);
9591 ATOMIC64_DECL_ONE(sub_386);
9592+ATOMIC64_DECL_ONE(sub_unchecked_386);
9593 ATOMIC64_DECL_ONE(inc_386);
9594+ATOMIC64_DECL_ONE(inc_unchecked_386);
9595 ATOMIC64_DECL_ONE(dec_386);
9596+ATOMIC64_DECL_ONE(dec_unchecked_386);
9597 #endif
9598
9599 #define alternative_atomic64(f, out, in...) \
9600 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
9601
9602 ATOMIC64_DECL(read);
9603+ATOMIC64_DECL(read_unchecked);
9604 ATOMIC64_DECL(set);
9605+ATOMIC64_DECL(set_unchecked);
9606 ATOMIC64_DECL(xchg);
9607 ATOMIC64_DECL(add_return);
9608+ATOMIC64_DECL(add_return_unchecked);
9609 ATOMIC64_DECL(sub_return);
9610+ATOMIC64_DECL(sub_return_unchecked);
9611 ATOMIC64_DECL(inc_return);
9612+ATOMIC64_DECL(inc_return_unchecked);
9613 ATOMIC64_DECL(dec_return);
9614+ATOMIC64_DECL(dec_return_unchecked);
9615 ATOMIC64_DECL(dec_if_positive);
9616 ATOMIC64_DECL(inc_not_zero);
9617 ATOMIC64_DECL(add_unless);
9618@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9619 }
9620
9621 /**
9622+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9623+ * @p: pointer to type atomic64_unchecked_t
9624+ * @o: expected value
9625+ * @n: new value
9626+ *
9627+ * Atomically sets @v to @n if it was equal to @o and returns
9628+ * the old value.
9629+ */
9630+
9631+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9632+{
9633+ return cmpxchg64(&v->counter, o, n);
9634+}
9635+
9636+/**
9637 * atomic64_xchg - xchg atomic64 variable
9638 * @v: pointer to type atomic64_t
9639 * @n: value to assign
9640@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9641 }
9642
9643 /**
9644+ * atomic64_set_unchecked - set atomic64 variable
9645+ * @v: pointer to type atomic64_unchecked_t
9646+ * @n: value to assign
9647+ *
9648+ * Atomically sets the value of @v to @n.
9649+ */
9650+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9651+{
9652+ unsigned high = (unsigned)(i >> 32);
9653+ unsigned low = (unsigned)i;
9654+ alternative_atomic64(set, /* no output */,
9655+ "S" (v), "b" (low), "c" (high)
9656+ : "eax", "edx", "memory");
9657+}
9658+
9659+/**
9660 * atomic64_read - read atomic64 variable
9661 * @v: pointer to type atomic64_t
9662 *
9663@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
9664 }
9665
9666 /**
9667+ * atomic64_read_unchecked - read atomic64 variable
9668+ * @v: pointer to type atomic64_unchecked_t
9669+ *
9670+ * Atomically reads the value of @v and returns it.
9671+ */
9672+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9673+{
9674+ long long r;
9675+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
9676+ return r;
9677+ }
9678+
9679+/**
9680 * atomic64_add_return - add and return
9681 * @i: integer value to add
9682 * @v: pointer to type atomic64_t
9683@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9684 return i;
9685 }
9686
9687+/**
9688+ * atomic64_add_return_unchecked - add and return
9689+ * @i: integer value to add
9690+ * @v: pointer to type atomic64_unchecked_t
9691+ *
9692+ * Atomically adds @i to @v and returns @i + *@v
9693+ */
9694+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9695+{
9696+ alternative_atomic64(add_return_unchecked,
9697+ ASM_OUTPUT2("+A" (i), "+c" (v)),
9698+ ASM_NO_INPUT_CLOBBER("memory"));
9699+ return i;
9700+}
9701+
9702 /*
9703 * Other variants with different arithmetic operators:
9704 */
9705@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9706 return a;
9707 }
9708
9709+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9710+{
9711+ long long a;
9712+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
9713+ "S" (v) : "memory", "ecx");
9714+ return a;
9715+}
9716+
9717 static inline long long atomic64_dec_return(atomic64_t *v)
9718 {
9719 long long a;
9720@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9721 }
9722
9723 /**
9724+ * atomic64_add_unchecked - add integer to atomic64 variable
9725+ * @i: integer value to add
9726+ * @v: pointer to type atomic64_unchecked_t
9727+ *
9728+ * Atomically adds @i to @v.
9729+ */
9730+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9731+{
9732+ __alternative_atomic64(add_unchecked, add_return_unchecked,
9733+ ASM_OUTPUT2("+A" (i), "+c" (v)),
9734+ ASM_NO_INPUT_CLOBBER("memory"));
9735+ return i;
9736+}
9737+
9738+/**
9739 * atomic64_sub - subtract the atomic64 variable
9740 * @i: integer value to subtract
9741 * @v: pointer to type atomic64_t
9742diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9743index 0e1cbfc..5623683 100644
9744--- a/arch/x86/include/asm/atomic64_64.h
9745+++ b/arch/x86/include/asm/atomic64_64.h
9746@@ -18,7 +18,19 @@
9747 */
9748 static inline long atomic64_read(const atomic64_t *v)
9749 {
9750- return (*(volatile long *)&(v)->counter);
9751+ return (*(volatile const long *)&(v)->counter);
9752+}
9753+
9754+/**
9755+ * atomic64_read_unchecked - read atomic64 variable
9756+ * @v: pointer of type atomic64_unchecked_t
9757+ *
9758+ * Atomically reads the value of @v.
9759+ * Doesn't imply a read memory barrier.
9760+ */
9761+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9762+{
9763+ return (*(volatile const long *)&(v)->counter);
9764 }
9765
9766 /**
9767@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9768 }
9769
9770 /**
9771+ * atomic64_set_unchecked - set atomic64 variable
9772+ * @v: pointer to type atomic64_unchecked_t
9773+ * @i: required value
9774+ *
9775+ * Atomically sets the value of @v to @i.
9776+ */
9777+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9778+{
9779+ v->counter = i;
9780+}
9781+
9782+/**
9783 * atomic64_add - add integer to atomic64 variable
9784 * @i: integer value to add
9785 * @v: pointer to type atomic64_t
9786@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9787 */
9788 static inline void atomic64_add(long i, atomic64_t *v)
9789 {
9790+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
9791+
9792+#ifdef CONFIG_PAX_REFCOUNT
9793+ "jno 0f\n"
9794+ LOCK_PREFIX "subq %1,%0\n"
9795+ "int $4\n0:\n"
9796+ _ASM_EXTABLE(0b, 0b)
9797+#endif
9798+
9799+ : "=m" (v->counter)
9800+ : "er" (i), "m" (v->counter));
9801+}
9802+
9803+/**
9804+ * atomic64_add_unchecked - add integer to atomic64 variable
9805+ * @i: integer value to add
9806+ * @v: pointer to type atomic64_unchecked_t
9807+ *
9808+ * Atomically adds @i to @v.
9809+ */
9810+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9811+{
9812 asm volatile(LOCK_PREFIX "addq %1,%0"
9813 : "=m" (v->counter)
9814 : "er" (i), "m" (v->counter));
9815@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9816 */
9817 static inline void atomic64_sub(long i, atomic64_t *v)
9818 {
9819- asm volatile(LOCK_PREFIX "subq %1,%0"
9820+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9821+
9822+#ifdef CONFIG_PAX_REFCOUNT
9823+ "jno 0f\n"
9824+ LOCK_PREFIX "addq %1,%0\n"
9825+ "int $4\n0:\n"
9826+ _ASM_EXTABLE(0b, 0b)
9827+#endif
9828+
9829+ : "=m" (v->counter)
9830+ : "er" (i), "m" (v->counter));
9831+}
9832+
9833+/**
9834+ * atomic64_sub_unchecked - subtract the atomic64 variable
9835+ * @i: integer value to subtract
9836+ * @v: pointer to type atomic64_unchecked_t
9837+ *
9838+ * Atomically subtracts @i from @v.
9839+ */
9840+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9841+{
9842+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9843 : "=m" (v->counter)
9844 : "er" (i), "m" (v->counter));
9845 }
9846@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9847 {
9848 unsigned char c;
9849
9850- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9851+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
9852+
9853+#ifdef CONFIG_PAX_REFCOUNT
9854+ "jno 0f\n"
9855+ LOCK_PREFIX "addq %2,%0\n"
9856+ "int $4\n0:\n"
9857+ _ASM_EXTABLE(0b, 0b)
9858+#endif
9859+
9860+ "sete %1\n"
9861 : "=m" (v->counter), "=qm" (c)
9862 : "er" (i), "m" (v->counter) : "memory");
9863 return c;
9864@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9865 */
9866 static inline void atomic64_inc(atomic64_t *v)
9867 {
9868+ asm volatile(LOCK_PREFIX "incq %0\n"
9869+
9870+#ifdef CONFIG_PAX_REFCOUNT
9871+ "jno 0f\n"
9872+ LOCK_PREFIX "decq %0\n"
9873+ "int $4\n0:\n"
9874+ _ASM_EXTABLE(0b, 0b)
9875+#endif
9876+
9877+ : "=m" (v->counter)
9878+ : "m" (v->counter));
9879+}
9880+
9881+/**
9882+ * atomic64_inc_unchecked - increment atomic64 variable
9883+ * @v: pointer to type atomic64_unchecked_t
9884+ *
9885+ * Atomically increments @v by 1.
9886+ */
9887+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9888+{
9889 asm volatile(LOCK_PREFIX "incq %0"
9890 : "=m" (v->counter)
9891 : "m" (v->counter));
9892@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9893 */
9894 static inline void atomic64_dec(atomic64_t *v)
9895 {
9896- asm volatile(LOCK_PREFIX "decq %0"
9897+ asm volatile(LOCK_PREFIX "decq %0\n"
9898+
9899+#ifdef CONFIG_PAX_REFCOUNT
9900+ "jno 0f\n"
9901+ LOCK_PREFIX "incq %0\n"
9902+ "int $4\n0:\n"
9903+ _ASM_EXTABLE(0b, 0b)
9904+#endif
9905+
9906+ : "=m" (v->counter)
9907+ : "m" (v->counter));
9908+}
9909+
9910+/**
9911+ * atomic64_dec_unchecked - decrement atomic64 variable
9912+ * @v: pointer to type atomic64_t
9913+ *
9914+ * Atomically decrements @v by 1.
9915+ */
9916+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9917+{
9918+ asm volatile(LOCK_PREFIX "decq %0\n"
9919 : "=m" (v->counter)
9920 : "m" (v->counter));
9921 }
9922@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9923 {
9924 unsigned char c;
9925
9926- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9927+ asm volatile(LOCK_PREFIX "decq %0\n"
9928+
9929+#ifdef CONFIG_PAX_REFCOUNT
9930+ "jno 0f\n"
9931+ LOCK_PREFIX "incq %0\n"
9932+ "int $4\n0:\n"
9933+ _ASM_EXTABLE(0b, 0b)
9934+#endif
9935+
9936+ "sete %1\n"
9937 : "=m" (v->counter), "=qm" (c)
9938 : "m" (v->counter) : "memory");
9939 return c != 0;
9940@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9941 {
9942 unsigned char c;
9943
9944- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9945+ asm volatile(LOCK_PREFIX "incq %0\n"
9946+
9947+#ifdef CONFIG_PAX_REFCOUNT
9948+ "jno 0f\n"
9949+ LOCK_PREFIX "decq %0\n"
9950+ "int $4\n0:\n"
9951+ _ASM_EXTABLE(0b, 0b)
9952+#endif
9953+
9954+ "sete %1\n"
9955 : "=m" (v->counter), "=qm" (c)
9956 : "m" (v->counter) : "memory");
9957 return c != 0;
9958@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9959 {
9960 unsigned char c;
9961
9962- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9963+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
9964+
9965+#ifdef CONFIG_PAX_REFCOUNT
9966+ "jno 0f\n"
9967+ LOCK_PREFIX "subq %2,%0\n"
9968+ "int $4\n0:\n"
9969+ _ASM_EXTABLE(0b, 0b)
9970+#endif
9971+
9972+ "sets %1\n"
9973 : "=m" (v->counter), "=qm" (c)
9974 : "er" (i), "m" (v->counter) : "memory");
9975 return c;
9976@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9977 */
9978 static inline long atomic64_add_return(long i, atomic64_t *v)
9979 {
9980+ return i + xadd_check_overflow(&v->counter, i);
9981+}
9982+
9983+/**
9984+ * atomic64_add_return_unchecked - add and return
9985+ * @i: integer value to add
9986+ * @v: pointer to type atomic64_unchecked_t
9987+ *
9988+ * Atomically adds @i to @v and returns @i + @v
9989+ */
9990+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9991+{
9992 return i + xadd(&v->counter, i);
9993 }
9994
9995@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9996 }
9997
9998 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9999+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10000+{
10001+ return atomic64_add_return_unchecked(1, v);
10002+}
10003 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
10004
10005 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10006@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10007 return cmpxchg(&v->counter, old, new);
10008 }
10009
10010+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
10011+{
10012+ return cmpxchg(&v->counter, old, new);
10013+}
10014+
10015 static inline long atomic64_xchg(atomic64_t *v, long new)
10016 {
10017 return xchg(&v->counter, new);
10018@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
10019 */
10020 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
10021 {
10022- long c, old;
10023+ long c, old, new;
10024 c = atomic64_read(v);
10025 for (;;) {
10026- if (unlikely(c == (u)))
10027+ if (unlikely(c == u))
10028 break;
10029- old = atomic64_cmpxchg((v), c, c + (a));
10030+
10031+ asm volatile("add %2,%0\n"
10032+
10033+#ifdef CONFIG_PAX_REFCOUNT
10034+ "jno 0f\n"
10035+ "sub %2,%0\n"
10036+ "int $4\n0:\n"
10037+ _ASM_EXTABLE(0b, 0b)
10038+#endif
10039+
10040+ : "=r" (new)
10041+ : "0" (c), "ir" (a));
10042+
10043+ old = atomic64_cmpxchg(v, c, new);
10044 if (likely(old == c))
10045 break;
10046 c = old;
10047 }
10048- return c != (u);
10049+ return c != u;
10050 }
10051
10052 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10053diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
10054index a6983b2..63f48a2 100644
10055--- a/arch/x86/include/asm/bitops.h
10056+++ b/arch/x86/include/asm/bitops.h
10057@@ -40,7 +40,7 @@
10058 * a mask operation on a byte.
10059 */
10060 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10061-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10062+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10063 #define CONST_MASK(nr) (1 << ((nr) & 7))
10064
10065 /**
10066diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10067index b13fe63..0dab13a 100644
10068--- a/arch/x86/include/asm/boot.h
10069+++ b/arch/x86/include/asm/boot.h
10070@@ -11,10 +11,15 @@
10071 #include <asm/pgtable_types.h>
10072
10073 /* Physical address where kernel should be loaded. */
10074-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10075+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10076 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10077 & ~(CONFIG_PHYSICAL_ALIGN - 1))
10078
10079+#ifndef __ASSEMBLY__
10080+extern unsigned char __LOAD_PHYSICAL_ADDR[];
10081+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
10082+#endif
10083+
10084 /* Minimum kernel alignment, as a power of two */
10085 #ifdef CONFIG_X86_64
10086 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10087diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10088index 48f99f1..d78ebf9 100644
10089--- a/arch/x86/include/asm/cache.h
10090+++ b/arch/x86/include/asm/cache.h
10091@@ -5,12 +5,13 @@
10092
10093 /* L1 cache line size */
10094 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10095-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10096+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10097
10098 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10099+#define __read_only __attribute__((__section__(".data..read_only")))
10100
10101 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
10102-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
10103+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
10104
10105 #ifdef CONFIG_X86_VSMP
10106 #ifdef CONFIG_SMP
10107diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10108index 9863ee3..4a1f8e1 100644
10109--- a/arch/x86/include/asm/cacheflush.h
10110+++ b/arch/x86/include/asm/cacheflush.h
10111@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
10112 unsigned long pg_flags = pg->flags & _PGMT_MASK;
10113
10114 if (pg_flags == _PGMT_DEFAULT)
10115- return -1;
10116+ return ~0UL;
10117 else if (pg_flags == _PGMT_WC)
10118 return _PAGE_CACHE_WC;
10119 else if (pg_flags == _PGMT_UC_MINUS)
10120diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10121index 46fc474..b02b0f9 100644
10122--- a/arch/x86/include/asm/checksum_32.h
10123+++ b/arch/x86/include/asm/checksum_32.h
10124@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10125 int len, __wsum sum,
10126 int *src_err_ptr, int *dst_err_ptr);
10127
10128+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10129+ int len, __wsum sum,
10130+ int *src_err_ptr, int *dst_err_ptr);
10131+
10132+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10133+ int len, __wsum sum,
10134+ int *src_err_ptr, int *dst_err_ptr);
10135+
10136 /*
10137 * Note: when you get a NULL pointer exception here this means someone
10138 * passed in an incorrect kernel address to one of these functions.
10139@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10140 int *err_ptr)
10141 {
10142 might_sleep();
10143- return csum_partial_copy_generic((__force void *)src, dst,
10144+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
10145 len, sum, err_ptr, NULL);
10146 }
10147
10148@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10149 {
10150 might_sleep();
10151 if (access_ok(VERIFY_WRITE, dst, len))
10152- return csum_partial_copy_generic(src, (__force void *)dst,
10153+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10154 len, sum, NULL, err_ptr);
10155
10156 if (len)
10157diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
10158index 99480e5..d81165b 100644
10159--- a/arch/x86/include/asm/cmpxchg.h
10160+++ b/arch/x86/include/asm/cmpxchg.h
10161@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
10162 __compiletime_error("Bad argument size for cmpxchg");
10163 extern void __xadd_wrong_size(void)
10164 __compiletime_error("Bad argument size for xadd");
10165+extern void __xadd_check_overflow_wrong_size(void)
10166+ __compiletime_error("Bad argument size for xadd_check_overflow");
10167 extern void __add_wrong_size(void)
10168 __compiletime_error("Bad argument size for add");
10169+extern void __add_check_overflow_wrong_size(void)
10170+ __compiletime_error("Bad argument size for add_check_overflow");
10171
10172 /*
10173 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
10174@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
10175 __ret; \
10176 })
10177
10178+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
10179+ ({ \
10180+ __typeof__ (*(ptr)) __ret = (arg); \
10181+ switch (sizeof(*(ptr))) { \
10182+ case __X86_CASE_L: \
10183+ asm volatile (lock #op "l %0, %1\n" \
10184+ "jno 0f\n" \
10185+ "mov %0,%1\n" \
10186+ "int $4\n0:\n" \
10187+ _ASM_EXTABLE(0b, 0b) \
10188+ : "+r" (__ret), "+m" (*(ptr)) \
10189+ : : "memory", "cc"); \
10190+ break; \
10191+ case __X86_CASE_Q: \
10192+ asm volatile (lock #op "q %q0, %1\n" \
10193+ "jno 0f\n" \
10194+ "mov %0,%1\n" \
10195+ "int $4\n0:\n" \
10196+ _ASM_EXTABLE(0b, 0b) \
10197+ : "+r" (__ret), "+m" (*(ptr)) \
10198+ : : "memory", "cc"); \
10199+ break; \
10200+ default: \
10201+ __ ## op ## _check_overflow_wrong_size(); \
10202+ } \
10203+ __ret; \
10204+ })
10205+
10206 /*
10207 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
10208 * Since this is generally used to protect other memory information, we
10209@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
10210 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
10211 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
10212
10213+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
10214+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
10215+
10216 #define __add(ptr, inc, lock) \
10217 ({ \
10218 __typeof__ (*(ptr)) __ret = (inc); \
10219diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
10220index f91e80f..7731066 100644
10221--- a/arch/x86/include/asm/cpufeature.h
10222+++ b/arch/x86/include/asm/cpufeature.h
10223@@ -202,11 +202,12 @@
10224 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
10225 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
10226 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
10227-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
10228+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
10229 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
10230 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
10231 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
10232 #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
10233+#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
10234
10235 #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
10236
10237@@ -371,7 +372,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
10238 ".section .discard,\"aw\",@progbits\n"
10239 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
10240 ".previous\n"
10241- ".section .altinstr_replacement,\"ax\"\n"
10242+ ".section .altinstr_replacement,\"a\"\n"
10243 "3: movb $1,%0\n"
10244 "4:\n"
10245 ".previous\n"
10246diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10247index 8bf1c06..f723dfd 100644
10248--- a/arch/x86/include/asm/desc.h
10249+++ b/arch/x86/include/asm/desc.h
10250@@ -4,6 +4,7 @@
10251 #include <asm/desc_defs.h>
10252 #include <asm/ldt.h>
10253 #include <asm/mmu.h>
10254+#include <asm/pgtable.h>
10255
10256 #include <linux/smp.h>
10257 #include <linux/percpu.h>
10258@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10259
10260 desc->type = (info->read_exec_only ^ 1) << 1;
10261 desc->type |= info->contents << 2;
10262+ desc->type |= info->seg_not_present ^ 1;
10263
10264 desc->s = 1;
10265 desc->dpl = 0x3;
10266@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10267 }
10268
10269 extern struct desc_ptr idt_descr;
10270-extern gate_desc idt_table[];
10271 extern struct desc_ptr nmi_idt_descr;
10272-extern gate_desc nmi_idt_table[];
10273-
10274-struct gdt_page {
10275- struct desc_struct gdt[GDT_ENTRIES];
10276-} __attribute__((aligned(PAGE_SIZE)));
10277-
10278-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10279+extern gate_desc idt_table[256];
10280+extern gate_desc nmi_idt_table[256];
10281
10282+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10283 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10284 {
10285- return per_cpu(gdt_page, cpu).gdt;
10286+ return cpu_gdt_table[cpu];
10287 }
10288
10289 #ifdef CONFIG_X86_64
10290@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10291 unsigned long base, unsigned dpl, unsigned flags,
10292 unsigned short seg)
10293 {
10294- gate->a = (seg << 16) | (base & 0xffff);
10295- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10296+ gate->gate.offset_low = base;
10297+ gate->gate.seg = seg;
10298+ gate->gate.reserved = 0;
10299+ gate->gate.type = type;
10300+ gate->gate.s = 0;
10301+ gate->gate.dpl = dpl;
10302+ gate->gate.p = 1;
10303+ gate->gate.offset_high = base >> 16;
10304 }
10305
10306 #endif
10307@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10308
10309 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
10310 {
10311+ pax_open_kernel();
10312 memcpy(&idt[entry], gate, sizeof(*gate));
10313+ pax_close_kernel();
10314 }
10315
10316 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
10317 {
10318+ pax_open_kernel();
10319 memcpy(&ldt[entry], desc, 8);
10320+ pax_close_kernel();
10321 }
10322
10323 static inline void
10324@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
10325 default: size = sizeof(*gdt); break;
10326 }
10327
10328+ pax_open_kernel();
10329 memcpy(&gdt[entry], desc, size);
10330+ pax_close_kernel();
10331 }
10332
10333 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10334@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10335
10336 static inline void native_load_tr_desc(void)
10337 {
10338+ pax_open_kernel();
10339 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10340+ pax_close_kernel();
10341 }
10342
10343 static inline void native_load_gdt(const struct desc_ptr *dtr)
10344@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10345 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10346 unsigned int i;
10347
10348+ pax_open_kernel();
10349 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10350 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10351+ pax_close_kernel();
10352 }
10353
10354 #define _LDT_empty(info) \
10355@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10356 }
10357
10358 #ifdef CONFIG_X86_64
10359-static inline void set_nmi_gate(int gate, void *addr)
10360+static inline void set_nmi_gate(int gate, const void *addr)
10361 {
10362 gate_desc s;
10363
10364@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
10365 }
10366 #endif
10367
10368-static inline void _set_gate(int gate, unsigned type, void *addr,
10369+static inline void _set_gate(int gate, unsigned type, const void *addr,
10370 unsigned dpl, unsigned ist, unsigned seg)
10371 {
10372 gate_desc s;
10373@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10374 * Pentium F0 0F bugfix can have resulted in the mapped
10375 * IDT being write-protected.
10376 */
10377-static inline void set_intr_gate(unsigned int n, void *addr)
10378+static inline void set_intr_gate(unsigned int n, const void *addr)
10379 {
10380 BUG_ON((unsigned)n > 0xFF);
10381 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10382@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10383 /*
10384 * This routine sets up an interrupt gate at directory privilege level 3.
10385 */
10386-static inline void set_system_intr_gate(unsigned int n, void *addr)
10387+static inline void set_system_intr_gate(unsigned int n, const void *addr)
10388 {
10389 BUG_ON((unsigned)n > 0xFF);
10390 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10391 }
10392
10393-static inline void set_system_trap_gate(unsigned int n, void *addr)
10394+static inline void set_system_trap_gate(unsigned int n, const void *addr)
10395 {
10396 BUG_ON((unsigned)n > 0xFF);
10397 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10398 }
10399
10400-static inline void set_trap_gate(unsigned int n, void *addr)
10401+static inline void set_trap_gate(unsigned int n, const void *addr)
10402 {
10403 BUG_ON((unsigned)n > 0xFF);
10404 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10405@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10406 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10407 {
10408 BUG_ON((unsigned)n > 0xFF);
10409- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10410+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10411 }
10412
10413-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10414+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10415 {
10416 BUG_ON((unsigned)n > 0xFF);
10417 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10418 }
10419
10420-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10421+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10422 {
10423 BUG_ON((unsigned)n > 0xFF);
10424 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10425 }
10426
10427+#ifdef CONFIG_X86_32
10428+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10429+{
10430+ struct desc_struct d;
10431+
10432+ if (likely(limit))
10433+ limit = (limit - 1UL) >> PAGE_SHIFT;
10434+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
10435+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10436+}
10437+#endif
10438+
10439 #endif /* _ASM_X86_DESC_H */
10440diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10441index 278441f..b95a174 100644
10442--- a/arch/x86/include/asm/desc_defs.h
10443+++ b/arch/x86/include/asm/desc_defs.h
10444@@ -31,6 +31,12 @@ struct desc_struct {
10445 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10446 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10447 };
10448+ struct {
10449+ u16 offset_low;
10450+ u16 seg;
10451+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10452+ unsigned offset_high: 16;
10453+ } gate;
10454 };
10455 } __attribute__((packed));
10456
10457diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10458index 3778256..c5d4fce 100644
10459--- a/arch/x86/include/asm/e820.h
10460+++ b/arch/x86/include/asm/e820.h
10461@@ -69,7 +69,7 @@ struct e820map {
10462 #define ISA_START_ADDRESS 0xa0000
10463 #define ISA_END_ADDRESS 0x100000
10464
10465-#define BIOS_BEGIN 0x000a0000
10466+#define BIOS_BEGIN 0x000c0000
10467 #define BIOS_END 0x00100000
10468
10469 #define BIOS_ROM_BASE 0xffe00000
10470diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10471index 5939f44..f8845f6 100644
10472--- a/arch/x86/include/asm/elf.h
10473+++ b/arch/x86/include/asm/elf.h
10474@@ -243,7 +243,25 @@ extern int force_personality32;
10475 the loader. We need to make sure that it is out of the way of the program
10476 that it will "exec", and that there is sufficient room for the brk. */
10477
10478+#ifdef CONFIG_PAX_SEGMEXEC
10479+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10480+#else
10481 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10482+#endif
10483+
10484+#ifdef CONFIG_PAX_ASLR
10485+#ifdef CONFIG_X86_32
10486+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10487+
10488+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10489+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10490+#else
10491+#define PAX_ELF_ET_DYN_BASE 0x400000UL
10492+
10493+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10494+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10495+#endif
10496+#endif
10497
10498 /* This yields a mask that user programs can use to figure out what
10499 instruction set this CPU supports. This could be done in user space,
10500@@ -296,16 +314,12 @@ do { \
10501
10502 #define ARCH_DLINFO \
10503 do { \
10504- if (vdso_enabled) \
10505- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10506- (unsigned long)current->mm->context.vdso); \
10507+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10508 } while (0)
10509
10510 #define ARCH_DLINFO_X32 \
10511 do { \
10512- if (vdso_enabled) \
10513- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10514- (unsigned long)current->mm->context.vdso); \
10515+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10516 } while (0)
10517
10518 #define AT_SYSINFO 32
10519@@ -320,7 +334,7 @@ else \
10520
10521 #endif /* !CONFIG_X86_32 */
10522
10523-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10524+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10525
10526 #define VDSO_ENTRY \
10527 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10528@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
10529 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10530 #define compat_arch_setup_additional_pages syscall32_setup_pages
10531
10532-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10533-#define arch_randomize_brk arch_randomize_brk
10534-
10535 /*
10536 * True on X86_32 or when emulating IA32 on X86_64
10537 */
10538diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10539index cc70c1c..d96d011 100644
10540--- a/arch/x86/include/asm/emergency-restart.h
10541+++ b/arch/x86/include/asm/emergency-restart.h
10542@@ -15,6 +15,6 @@ enum reboot_type {
10543
10544 extern enum reboot_type reboot_type;
10545
10546-extern void machine_emergency_restart(void);
10547+extern void machine_emergency_restart(void) __noreturn;
10548
10549 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10550diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
10551index 75f4c6d..ee3eb8f 100644
10552--- a/arch/x86/include/asm/fpu-internal.h
10553+++ b/arch/x86/include/asm/fpu-internal.h
10554@@ -86,6 +86,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10555 {
10556 int err;
10557
10558+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10559+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10560+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10561+#endif
10562+
10563 /* See comment in fxsave() below. */
10564 #ifdef CONFIG_AS_FXSAVEQ
10565 asm volatile("1: fxrstorq %[fx]\n\t"
10566@@ -115,6 +120,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10567 {
10568 int err;
10569
10570+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10571+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10572+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10573+#endif
10574+
10575 /*
10576 * Clear the bytes not touched by the fxsave and reserved
10577 * for the SW usage.
10578@@ -271,7 +281,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10579 "emms\n\t" /* clear stack tags */
10580 "fildl %P[addr]", /* set F?P to defined value */
10581 X86_FEATURE_FXSAVE_LEAK,
10582- [addr] "m" (tsk->thread.fpu.has_fpu));
10583+ [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10584
10585 return fpu_restore_checking(&tsk->thread.fpu);
10586 }
10587diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10588index 71ecbcb..bac10b7 100644
10589--- a/arch/x86/include/asm/futex.h
10590+++ b/arch/x86/include/asm/futex.h
10591@@ -11,16 +11,18 @@
10592 #include <asm/processor.h>
10593
10594 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10595+ typecheck(u32 __user *, uaddr); \
10596 asm volatile("1:\t" insn "\n" \
10597 "2:\t.section .fixup,\"ax\"\n" \
10598 "3:\tmov\t%3, %1\n" \
10599 "\tjmp\t2b\n" \
10600 "\t.previous\n" \
10601 _ASM_EXTABLE(1b, 3b) \
10602- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10603+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10604 : "i" (-EFAULT), "0" (oparg), "1" (0))
10605
10606 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10607+ typecheck(u32 __user *, uaddr); \
10608 asm volatile("1:\tmovl %2, %0\n" \
10609 "\tmovl\t%0, %3\n" \
10610 "\t" insn "\n" \
10611@@ -33,7 +35,7 @@
10612 _ASM_EXTABLE(1b, 4b) \
10613 _ASM_EXTABLE(2b, 4b) \
10614 : "=&a" (oldval), "=&r" (ret), \
10615- "+m" (*uaddr), "=&r" (tem) \
10616+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10617 : "r" (oparg), "i" (-EFAULT), "1" (0))
10618
10619 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10620@@ -60,10 +62,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10621
10622 switch (op) {
10623 case FUTEX_OP_SET:
10624- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10625+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10626 break;
10627 case FUTEX_OP_ADD:
10628- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10629+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10630 uaddr, oparg);
10631 break;
10632 case FUTEX_OP_OR:
10633@@ -122,13 +124,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10634 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10635 return -EFAULT;
10636
10637- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10638+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10639 "2:\t.section .fixup, \"ax\"\n"
10640 "3:\tmov %3, %0\n"
10641 "\tjmp 2b\n"
10642 "\t.previous\n"
10643 _ASM_EXTABLE(1b, 3b)
10644- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10645+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10646 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10647 : "memory"
10648 );
10649diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10650index eb92a6e..b98b2f4 100644
10651--- a/arch/x86/include/asm/hw_irq.h
10652+++ b/arch/x86/include/asm/hw_irq.h
10653@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10654 extern void enable_IO_APIC(void);
10655
10656 /* Statistics */
10657-extern atomic_t irq_err_count;
10658-extern atomic_t irq_mis_count;
10659+extern atomic_unchecked_t irq_err_count;
10660+extern atomic_unchecked_t irq_mis_count;
10661
10662 /* EISA */
10663 extern void eisa_set_level_irq(unsigned int irq);
10664diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10665index d8e8eef..99f81ae 100644
10666--- a/arch/x86/include/asm/io.h
10667+++ b/arch/x86/include/asm/io.h
10668@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10669
10670 #include <linux/vmalloc.h>
10671
10672+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10673+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10674+{
10675+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10676+}
10677+
10678+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10679+{
10680+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10681+}
10682+
10683 /*
10684 * Convert a virtual cached pointer to an uncached pointer
10685 */
10686diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10687index bba3cf8..06bc8da 100644
10688--- a/arch/x86/include/asm/irqflags.h
10689+++ b/arch/x86/include/asm/irqflags.h
10690@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10691 sti; \
10692 sysexit
10693
10694+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10695+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10696+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10697+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10698+
10699 #else
10700 #define INTERRUPT_RETURN iret
10701 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10702diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10703index 5478825..839e88c 100644
10704--- a/arch/x86/include/asm/kprobes.h
10705+++ b/arch/x86/include/asm/kprobes.h
10706@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10707 #define RELATIVEJUMP_SIZE 5
10708 #define RELATIVECALL_OPCODE 0xe8
10709 #define RELATIVE_ADDR_SIZE 4
10710-#define MAX_STACK_SIZE 64
10711-#define MIN_STACK_SIZE(ADDR) \
10712- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10713- THREAD_SIZE - (unsigned long)(ADDR))) \
10714- ? (MAX_STACK_SIZE) \
10715- : (((unsigned long)current_thread_info()) + \
10716- THREAD_SIZE - (unsigned long)(ADDR)))
10717+#define MAX_STACK_SIZE 64UL
10718+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10719
10720 #define flush_insn_slot(p) do { } while (0)
10721
10722diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10723index db7c1f2..92f130a 100644
10724--- a/arch/x86/include/asm/kvm_host.h
10725+++ b/arch/x86/include/asm/kvm_host.h
10726@@ -680,7 +680,7 @@ struct kvm_x86_ops {
10727 int (*check_intercept)(struct kvm_vcpu *vcpu,
10728 struct x86_instruction_info *info,
10729 enum x86_intercept_stage stage);
10730-};
10731+} __do_const;
10732
10733 struct kvm_arch_async_pf {
10734 u32 token;
10735diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10736index c8bed0d..e5721fa 100644
10737--- a/arch/x86/include/asm/local.h
10738+++ b/arch/x86/include/asm/local.h
10739@@ -17,26 +17,58 @@ typedef struct {
10740
10741 static inline void local_inc(local_t *l)
10742 {
10743- asm volatile(_ASM_INC "%0"
10744+ asm volatile(_ASM_INC "%0\n"
10745+
10746+#ifdef CONFIG_PAX_REFCOUNT
10747+ "jno 0f\n"
10748+ _ASM_DEC "%0\n"
10749+ "int $4\n0:\n"
10750+ _ASM_EXTABLE(0b, 0b)
10751+#endif
10752+
10753 : "+m" (l->a.counter));
10754 }
10755
10756 static inline void local_dec(local_t *l)
10757 {
10758- asm volatile(_ASM_DEC "%0"
10759+ asm volatile(_ASM_DEC "%0\n"
10760+
10761+#ifdef CONFIG_PAX_REFCOUNT
10762+ "jno 0f\n"
10763+ _ASM_INC "%0\n"
10764+ "int $4\n0:\n"
10765+ _ASM_EXTABLE(0b, 0b)
10766+#endif
10767+
10768 : "+m" (l->a.counter));
10769 }
10770
10771 static inline void local_add(long i, local_t *l)
10772 {
10773- asm volatile(_ASM_ADD "%1,%0"
10774+ asm volatile(_ASM_ADD "%1,%0\n"
10775+
10776+#ifdef CONFIG_PAX_REFCOUNT
10777+ "jno 0f\n"
10778+ _ASM_SUB "%1,%0\n"
10779+ "int $4\n0:\n"
10780+ _ASM_EXTABLE(0b, 0b)
10781+#endif
10782+
10783 : "+m" (l->a.counter)
10784 : "ir" (i));
10785 }
10786
10787 static inline void local_sub(long i, local_t *l)
10788 {
10789- asm volatile(_ASM_SUB "%1,%0"
10790+ asm volatile(_ASM_SUB "%1,%0\n"
10791+
10792+#ifdef CONFIG_PAX_REFCOUNT
10793+ "jno 0f\n"
10794+ _ASM_ADD "%1,%0\n"
10795+ "int $4\n0:\n"
10796+ _ASM_EXTABLE(0b, 0b)
10797+#endif
10798+
10799 : "+m" (l->a.counter)
10800 : "ir" (i));
10801 }
10802@@ -54,7 +86,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10803 {
10804 unsigned char c;
10805
10806- asm volatile(_ASM_SUB "%2,%0; sete %1"
10807+ asm volatile(_ASM_SUB "%2,%0\n"
10808+
10809+#ifdef CONFIG_PAX_REFCOUNT
10810+ "jno 0f\n"
10811+ _ASM_ADD "%2,%0\n"
10812+ "int $4\n0:\n"
10813+ _ASM_EXTABLE(0b, 0b)
10814+#endif
10815+
10816+ "sete %1\n"
10817 : "+m" (l->a.counter), "=qm" (c)
10818 : "ir" (i) : "memory");
10819 return c;
10820@@ -72,7 +113,16 @@ static inline int local_dec_and_test(local_t *l)
10821 {
10822 unsigned char c;
10823
10824- asm volatile(_ASM_DEC "%0; sete %1"
10825+ asm volatile(_ASM_DEC "%0\n"
10826+
10827+#ifdef CONFIG_PAX_REFCOUNT
10828+ "jno 0f\n"
10829+ _ASM_INC "%0\n"
10830+ "int $4\n0:\n"
10831+ _ASM_EXTABLE(0b, 0b)
10832+#endif
10833+
10834+ "sete %1\n"
10835 : "+m" (l->a.counter), "=qm" (c)
10836 : : "memory");
10837 return c != 0;
10838@@ -90,7 +140,16 @@ static inline int local_inc_and_test(local_t *l)
10839 {
10840 unsigned char c;
10841
10842- asm volatile(_ASM_INC "%0; sete %1"
10843+ asm volatile(_ASM_INC "%0\n"
10844+
10845+#ifdef CONFIG_PAX_REFCOUNT
10846+ "jno 0f\n"
10847+ _ASM_DEC "%0\n"
10848+ "int $4\n0:\n"
10849+ _ASM_EXTABLE(0b, 0b)
10850+#endif
10851+
10852+ "sete %1\n"
10853 : "+m" (l->a.counter), "=qm" (c)
10854 : : "memory");
10855 return c != 0;
10856@@ -109,7 +168,16 @@ static inline int local_add_negative(long i, local_t *l)
10857 {
10858 unsigned char c;
10859
10860- asm volatile(_ASM_ADD "%2,%0; sets %1"
10861+ asm volatile(_ASM_ADD "%2,%0\n"
10862+
10863+#ifdef CONFIG_PAX_REFCOUNT
10864+ "jno 0f\n"
10865+ _ASM_SUB "%2,%0\n"
10866+ "int $4\n0:\n"
10867+ _ASM_EXTABLE(0b, 0b)
10868+#endif
10869+
10870+ "sets %1\n"
10871 : "+m" (l->a.counter), "=qm" (c)
10872 : "ir" (i) : "memory");
10873 return c;
10874@@ -132,7 +200,15 @@ static inline long local_add_return(long i, local_t *l)
10875 #endif
10876 /* Modern 486+ processor */
10877 __i = i;
10878- asm volatile(_ASM_XADD "%0, %1;"
10879+ asm volatile(_ASM_XADD "%0, %1\n"
10880+
10881+#ifdef CONFIG_PAX_REFCOUNT
10882+ "jno 0f\n"
10883+ _ASM_MOV "%0,%1\n"
10884+ "int $4\n0:\n"
10885+ _ASM_EXTABLE(0b, 0b)
10886+#endif
10887+
10888 : "+r" (i), "+m" (l->a.counter)
10889 : : "memory");
10890 return i + __i;
10891diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10892index 593e51d..fa69c9a 100644
10893--- a/arch/x86/include/asm/mman.h
10894+++ b/arch/x86/include/asm/mman.h
10895@@ -5,4 +5,14 @@
10896
10897 #include <asm-generic/mman.h>
10898
10899+#ifdef __KERNEL__
10900+#ifndef __ASSEMBLY__
10901+#ifdef CONFIG_X86_32
10902+#define arch_mmap_check i386_mmap_check
10903+int i386_mmap_check(unsigned long addr, unsigned long len,
10904+ unsigned long flags);
10905+#endif
10906+#endif
10907+#endif
10908+
10909 #endif /* _ASM_X86_MMAN_H */
10910diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10911index 5f55e69..e20bfb1 100644
10912--- a/arch/x86/include/asm/mmu.h
10913+++ b/arch/x86/include/asm/mmu.h
10914@@ -9,7 +9,7 @@
10915 * we put the segment information here.
10916 */
10917 typedef struct {
10918- void *ldt;
10919+ struct desc_struct *ldt;
10920 int size;
10921
10922 #ifdef CONFIG_X86_64
10923@@ -18,7 +18,19 @@ typedef struct {
10924 #endif
10925
10926 struct mutex lock;
10927- void *vdso;
10928+ unsigned long vdso;
10929+
10930+#ifdef CONFIG_X86_32
10931+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10932+ unsigned long user_cs_base;
10933+ unsigned long user_cs_limit;
10934+
10935+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10936+ cpumask_t cpu_user_cs_mask;
10937+#endif
10938+
10939+#endif
10940+#endif
10941 } mm_context_t;
10942
10943 #ifdef CONFIG_SMP
10944diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10945index cdbf367..adb37ac 100644
10946--- a/arch/x86/include/asm/mmu_context.h
10947+++ b/arch/x86/include/asm/mmu_context.h
10948@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10949
10950 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10951 {
10952+
10953+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10954+ unsigned int i;
10955+ pgd_t *pgd;
10956+
10957+ pax_open_kernel();
10958+ pgd = get_cpu_pgd(smp_processor_id());
10959+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10960+ set_pgd_batched(pgd+i, native_make_pgd(0));
10961+ pax_close_kernel();
10962+#endif
10963+
10964 #ifdef CONFIG_SMP
10965 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10966 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10967@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10968 struct task_struct *tsk)
10969 {
10970 unsigned cpu = smp_processor_id();
10971+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10972+ int tlbstate = TLBSTATE_OK;
10973+#endif
10974
10975 if (likely(prev != next)) {
10976 #ifdef CONFIG_SMP
10977+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10978+ tlbstate = this_cpu_read(cpu_tlbstate.state);
10979+#endif
10980 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10981 this_cpu_write(cpu_tlbstate.active_mm, next);
10982 #endif
10983 cpumask_set_cpu(cpu, mm_cpumask(next));
10984
10985 /* Re-load page tables */
10986+#ifdef CONFIG_PAX_PER_CPU_PGD
10987+ pax_open_kernel();
10988+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
10989+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
10990+ pax_close_kernel();
10991+ load_cr3(get_cpu_pgd(cpu));
10992+#else
10993 load_cr3(next->pgd);
10994+#endif
10995
10996 /* stop flush ipis for the previous mm */
10997 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10998@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10999 */
11000 if (unlikely(prev->context.ldt != next->context.ldt))
11001 load_LDT_nolock(&next->context);
11002- }
11003+
11004+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11005+ if (!(__supported_pte_mask & _PAGE_NX)) {
11006+ smp_mb__before_clear_bit();
11007+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11008+ smp_mb__after_clear_bit();
11009+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11010+ }
11011+#endif
11012+
11013+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11014+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
11015+ prev->context.user_cs_limit != next->context.user_cs_limit))
11016+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11017 #ifdef CONFIG_SMP
11018+ else if (unlikely(tlbstate != TLBSTATE_OK))
11019+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11020+#endif
11021+#endif
11022+
11023+ }
11024 else {
11025+
11026+#ifdef CONFIG_PAX_PER_CPU_PGD
11027+ pax_open_kernel();
11028+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
11029+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
11030+ pax_close_kernel();
11031+ load_cr3(get_cpu_pgd(cpu));
11032+#endif
11033+
11034+#ifdef CONFIG_SMP
11035 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11036 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
11037
11038@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11039 * tlb flush IPI delivery. We must reload CR3
11040 * to make sure to use no freed page tables.
11041 */
11042+
11043+#ifndef CONFIG_PAX_PER_CPU_PGD
11044 load_cr3(next->pgd);
11045+#endif
11046+
11047 load_LDT_nolock(&next->context);
11048+
11049+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
11050+ if (!(__supported_pte_mask & _PAGE_NX))
11051+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11052+#endif
11053+
11054+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11055+#ifdef CONFIG_PAX_PAGEEXEC
11056+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
11057+#endif
11058+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11059+#endif
11060+
11061 }
11062+#endif
11063 }
11064-#endif
11065 }
11066
11067 #define activate_mm(prev, next) \
11068diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11069index 9eae775..c914fea 100644
11070--- a/arch/x86/include/asm/module.h
11071+++ b/arch/x86/include/asm/module.h
11072@@ -5,6 +5,7 @@
11073
11074 #ifdef CONFIG_X86_64
11075 /* X86_64 does not define MODULE_PROC_FAMILY */
11076+#define MODULE_PROC_FAMILY ""
11077 #elif defined CONFIG_M386
11078 #define MODULE_PROC_FAMILY "386 "
11079 #elif defined CONFIG_M486
11080@@ -59,8 +60,20 @@
11081 #error unknown processor family
11082 #endif
11083
11084-#ifdef CONFIG_X86_32
11085-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
11086+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11087+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11088+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11089+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
11090+#else
11091+#define MODULE_PAX_KERNEXEC ""
11092 #endif
11093
11094+#ifdef CONFIG_PAX_MEMORY_UDEREF
11095+#define MODULE_PAX_UDEREF "UDEREF "
11096+#else
11097+#define MODULE_PAX_UDEREF ""
11098+#endif
11099+
11100+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11101+
11102 #endif /* _ASM_X86_MODULE_H */
11103diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11104index 320f7bb..e89f8f8 100644
11105--- a/arch/x86/include/asm/page_64_types.h
11106+++ b/arch/x86/include/asm/page_64_types.h
11107@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11108
11109 /* duplicated to the one in bootmem.h */
11110 extern unsigned long max_pfn;
11111-extern unsigned long phys_base;
11112+extern const unsigned long phys_base;
11113
11114 extern unsigned long __phys_addr(unsigned long);
11115 #define __phys_reloc_hide(x) (x)
11116diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11117index 6cbbabf..11b3aed 100644
11118--- a/arch/x86/include/asm/paravirt.h
11119+++ b/arch/x86/include/asm/paravirt.h
11120@@ -668,6 +668,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11121 val);
11122 }
11123
11124+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11125+{
11126+ pgdval_t val = native_pgd_val(pgd);
11127+
11128+ if (sizeof(pgdval_t) > sizeof(long))
11129+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11130+ val, (u64)val >> 32);
11131+ else
11132+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11133+ val);
11134+}
11135+
11136 static inline void pgd_clear(pgd_t *pgdp)
11137 {
11138 set_pgd(pgdp, __pgd(0));
11139@@ -749,6 +761,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11140 pv_mmu_ops.set_fixmap(idx, phys, flags);
11141 }
11142
11143+#ifdef CONFIG_PAX_KERNEXEC
11144+static inline unsigned long pax_open_kernel(void)
11145+{
11146+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11147+}
11148+
11149+static inline unsigned long pax_close_kernel(void)
11150+{
11151+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11152+}
11153+#else
11154+static inline unsigned long pax_open_kernel(void) { return 0; }
11155+static inline unsigned long pax_close_kernel(void) { return 0; }
11156+#endif
11157+
11158 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11159
11160 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
11161@@ -965,7 +992,7 @@ extern void default_banner(void);
11162
11163 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11164 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11165-#define PARA_INDIRECT(addr) *%cs:addr
11166+#define PARA_INDIRECT(addr) *%ss:addr
11167 #endif
11168
11169 #define INTERRUPT_RETURN \
11170@@ -1040,6 +1067,21 @@ extern void default_banner(void);
11171 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11172 CLBR_NONE, \
11173 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11174+
11175+#define GET_CR0_INTO_RDI \
11176+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11177+ mov %rax,%rdi
11178+
11179+#define SET_RDI_INTO_CR0 \
11180+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11181+
11182+#define GET_CR3_INTO_RDI \
11183+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11184+ mov %rax,%rdi
11185+
11186+#define SET_RDI_INTO_CR3 \
11187+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11188+
11189 #endif /* CONFIG_X86_32 */
11190
11191 #endif /* __ASSEMBLY__ */
11192diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11193index 8e8b9a4..f07d725 100644
11194--- a/arch/x86/include/asm/paravirt_types.h
11195+++ b/arch/x86/include/asm/paravirt_types.h
11196@@ -84,20 +84,20 @@ struct pv_init_ops {
11197 */
11198 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11199 unsigned long addr, unsigned len);
11200-};
11201+} __no_const;
11202
11203
11204 struct pv_lazy_ops {
11205 /* Set deferred update mode, used for batching operations. */
11206 void (*enter)(void);
11207 void (*leave)(void);
11208-};
11209+} __no_const;
11210
11211 struct pv_time_ops {
11212 unsigned long long (*sched_clock)(void);
11213 unsigned long long (*steal_clock)(int cpu);
11214 unsigned long (*get_tsc_khz)(void);
11215-};
11216+} __no_const;
11217
11218 struct pv_cpu_ops {
11219 /* hooks for various privileged instructions */
11220@@ -193,7 +193,7 @@ struct pv_cpu_ops {
11221
11222 void (*start_context_switch)(struct task_struct *prev);
11223 void (*end_context_switch)(struct task_struct *next);
11224-};
11225+} __no_const;
11226
11227 struct pv_irq_ops {
11228 /*
11229@@ -224,7 +224,7 @@ struct pv_apic_ops {
11230 unsigned long start_eip,
11231 unsigned long start_esp);
11232 #endif
11233-};
11234+} __no_const;
11235
11236 struct pv_mmu_ops {
11237 unsigned long (*read_cr2)(void);
11238@@ -313,6 +313,7 @@ struct pv_mmu_ops {
11239 struct paravirt_callee_save make_pud;
11240
11241 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11242+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11243 #endif /* PAGETABLE_LEVELS == 4 */
11244 #endif /* PAGETABLE_LEVELS >= 3 */
11245
11246@@ -324,6 +325,12 @@ struct pv_mmu_ops {
11247 an mfn. We can tell which is which from the index. */
11248 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11249 phys_addr_t phys, pgprot_t flags);
11250+
11251+#ifdef CONFIG_PAX_KERNEXEC
11252+ unsigned long (*pax_open_kernel)(void);
11253+ unsigned long (*pax_close_kernel)(void);
11254+#endif
11255+
11256 };
11257
11258 struct arch_spinlock;
11259@@ -334,7 +341,7 @@ struct pv_lock_ops {
11260 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
11261 int (*spin_trylock)(struct arch_spinlock *lock);
11262 void (*spin_unlock)(struct arch_spinlock *lock);
11263-};
11264+} __no_const;
11265
11266 /* This contains all the paravirt structures: we get a convenient
11267 * number for each function using the offset which we use to indicate
11268diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11269index b4389a4..7024269 100644
11270--- a/arch/x86/include/asm/pgalloc.h
11271+++ b/arch/x86/include/asm/pgalloc.h
11272@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11273 pmd_t *pmd, pte_t *pte)
11274 {
11275 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11276+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11277+}
11278+
11279+static inline void pmd_populate_user(struct mm_struct *mm,
11280+ pmd_t *pmd, pte_t *pte)
11281+{
11282+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11283 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11284 }
11285
11286@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
11287
11288 #ifdef CONFIG_X86_PAE
11289 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
11290+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
11291+{
11292+ pud_populate(mm, pudp, pmd);
11293+}
11294 #else /* !CONFIG_X86_PAE */
11295 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11296 {
11297 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11298 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
11299 }
11300+
11301+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11302+{
11303+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11304+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
11305+}
11306 #endif /* CONFIG_X86_PAE */
11307
11308 #if PAGETABLE_LEVELS > 3
11309@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11310 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
11311 }
11312
11313+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11314+{
11315+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
11316+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
11317+}
11318+
11319 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
11320 {
11321 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
11322diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11323index 98391db..8f6984e 100644
11324--- a/arch/x86/include/asm/pgtable-2level.h
11325+++ b/arch/x86/include/asm/pgtable-2level.h
11326@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11327
11328 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11329 {
11330+ pax_open_kernel();
11331 *pmdp = pmd;
11332+ pax_close_kernel();
11333 }
11334
11335 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11336diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11337index cb00ccc..17e9054 100644
11338--- a/arch/x86/include/asm/pgtable-3level.h
11339+++ b/arch/x86/include/asm/pgtable-3level.h
11340@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11341
11342 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11343 {
11344+ pax_open_kernel();
11345 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11346+ pax_close_kernel();
11347 }
11348
11349 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11350 {
11351+ pax_open_kernel();
11352 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11353+ pax_close_kernel();
11354 }
11355
11356 /*
11357diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11358index 49afb3f..91a8c63 100644
11359--- a/arch/x86/include/asm/pgtable.h
11360+++ b/arch/x86/include/asm/pgtable.h
11361@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11362
11363 #ifndef __PAGETABLE_PUD_FOLDED
11364 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11365+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11366 #define pgd_clear(pgd) native_pgd_clear(pgd)
11367 #endif
11368
11369@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11370
11371 #define arch_end_context_switch(prev) do {} while(0)
11372
11373+#define pax_open_kernel() native_pax_open_kernel()
11374+#define pax_close_kernel() native_pax_close_kernel()
11375 #endif /* CONFIG_PARAVIRT */
11376
11377+#define __HAVE_ARCH_PAX_OPEN_KERNEL
11378+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11379+
11380+#ifdef CONFIG_PAX_KERNEXEC
11381+static inline unsigned long native_pax_open_kernel(void)
11382+{
11383+ unsigned long cr0;
11384+
11385+ preempt_disable();
11386+ barrier();
11387+ cr0 = read_cr0() ^ X86_CR0_WP;
11388+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
11389+ write_cr0(cr0);
11390+ return cr0 ^ X86_CR0_WP;
11391+}
11392+
11393+static inline unsigned long native_pax_close_kernel(void)
11394+{
11395+ unsigned long cr0;
11396+
11397+ cr0 = read_cr0() ^ X86_CR0_WP;
11398+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11399+ write_cr0(cr0);
11400+ barrier();
11401+ preempt_enable_no_resched();
11402+ return cr0 ^ X86_CR0_WP;
11403+}
11404+#else
11405+static inline unsigned long native_pax_open_kernel(void) { return 0; }
11406+static inline unsigned long native_pax_close_kernel(void) { return 0; }
11407+#endif
11408+
11409 /*
11410 * The following only work if pte_present() is true.
11411 * Undefined behaviour if not..
11412 */
11413+static inline int pte_user(pte_t pte)
11414+{
11415+ return pte_val(pte) & _PAGE_USER;
11416+}
11417+
11418 static inline int pte_dirty(pte_t pte)
11419 {
11420 return pte_flags(pte) & _PAGE_DIRTY;
11421@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11422 return pte_clear_flags(pte, _PAGE_RW);
11423 }
11424
11425+static inline pte_t pte_mkread(pte_t pte)
11426+{
11427+ return __pte(pte_val(pte) | _PAGE_USER);
11428+}
11429+
11430 static inline pte_t pte_mkexec(pte_t pte)
11431 {
11432- return pte_clear_flags(pte, _PAGE_NX);
11433+#ifdef CONFIG_X86_PAE
11434+ if (__supported_pte_mask & _PAGE_NX)
11435+ return pte_clear_flags(pte, _PAGE_NX);
11436+ else
11437+#endif
11438+ return pte_set_flags(pte, _PAGE_USER);
11439+}
11440+
11441+static inline pte_t pte_exprotect(pte_t pte)
11442+{
11443+#ifdef CONFIG_X86_PAE
11444+ if (__supported_pte_mask & _PAGE_NX)
11445+ return pte_set_flags(pte, _PAGE_NX);
11446+ else
11447+#endif
11448+ return pte_clear_flags(pte, _PAGE_USER);
11449 }
11450
11451 static inline pte_t pte_mkdirty(pte_t pte)
11452@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11453 #endif
11454
11455 #ifndef __ASSEMBLY__
11456+
11457+#ifdef CONFIG_PAX_PER_CPU_PGD
11458+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11459+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11460+{
11461+ return cpu_pgd[cpu];
11462+}
11463+#endif
11464+
11465 #include <linux/mm_types.h>
11466
11467 static inline int pte_none(pte_t pte)
11468@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11469
11470 static inline int pgd_bad(pgd_t pgd)
11471 {
11472- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11473+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11474 }
11475
11476 static inline int pgd_none(pgd_t pgd)
11477@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
11478 * pgd_offset() returns a (pgd_t *)
11479 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11480 */
11481-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11482+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11483+
11484+#ifdef CONFIG_PAX_PER_CPU_PGD
11485+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11486+#endif
11487+
11488 /*
11489 * a shortcut which implies the use of the kernel's pgd, instead
11490 * of a process's
11491@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
11492 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11493 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11494
11495+#ifdef CONFIG_X86_32
11496+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11497+#else
11498+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11499+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11500+
11501+#ifdef CONFIG_PAX_MEMORY_UDEREF
11502+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11503+#else
11504+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11505+#endif
11506+
11507+#endif
11508+
11509 #ifndef __ASSEMBLY__
11510
11511 extern int direct_gbpages;
11512@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
11513 * dst and src can be on the same page, but the range must not overlap,
11514 * and must not cross a page boundary.
11515 */
11516-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11517+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11518 {
11519- memcpy(dst, src, count * sizeof(pgd_t));
11520+ pax_open_kernel();
11521+ while (count--)
11522+ *dst++ = *src++;
11523+ pax_close_kernel();
11524 }
11525
11526+#ifdef CONFIG_PAX_PER_CPU_PGD
11527+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
11528+#endif
11529+
11530+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11531+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
11532+#else
11533+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
11534+#endif
11535
11536 #include <asm-generic/pgtable.h>
11537 #endif /* __ASSEMBLY__ */
11538diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11539index 0c92113..34a77c6 100644
11540--- a/arch/x86/include/asm/pgtable_32.h
11541+++ b/arch/x86/include/asm/pgtable_32.h
11542@@ -25,9 +25,6 @@
11543 struct mm_struct;
11544 struct vm_area_struct;
11545
11546-extern pgd_t swapper_pg_dir[1024];
11547-extern pgd_t initial_page_table[1024];
11548-
11549 static inline void pgtable_cache_init(void) { }
11550 static inline void check_pgt_cache(void) { }
11551 void paging_init(void);
11552@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11553 # include <asm/pgtable-2level.h>
11554 #endif
11555
11556+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11557+extern pgd_t initial_page_table[PTRS_PER_PGD];
11558+#ifdef CONFIG_X86_PAE
11559+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11560+#endif
11561+
11562 #if defined(CONFIG_HIGHPTE)
11563 #define pte_offset_map(dir, address) \
11564 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11565@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11566 /* Clear a kernel PTE and flush it from the TLB */
11567 #define kpte_clear_flush(ptep, vaddr) \
11568 do { \
11569+ pax_open_kernel(); \
11570 pte_clear(&init_mm, (vaddr), (ptep)); \
11571+ pax_close_kernel(); \
11572 __flush_tlb_one((vaddr)); \
11573 } while (0)
11574
11575@@ -74,6 +79,9 @@ do { \
11576
11577 #endif /* !__ASSEMBLY__ */
11578
11579+#define HAVE_ARCH_UNMAPPED_AREA
11580+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11581+
11582 /*
11583 * kern_addr_valid() is (1) for FLATMEM and (0) for
11584 * SPARSEMEM and DISCONTIGMEM
11585diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11586index ed5903b..c7fe163 100644
11587--- a/arch/x86/include/asm/pgtable_32_types.h
11588+++ b/arch/x86/include/asm/pgtable_32_types.h
11589@@ -8,7 +8,7 @@
11590 */
11591 #ifdef CONFIG_X86_PAE
11592 # include <asm/pgtable-3level_types.h>
11593-# define PMD_SIZE (1UL << PMD_SHIFT)
11594+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11595 # define PMD_MASK (~(PMD_SIZE - 1))
11596 #else
11597 # include <asm/pgtable-2level_types.h>
11598@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11599 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11600 #endif
11601
11602+#ifdef CONFIG_PAX_KERNEXEC
11603+#ifndef __ASSEMBLY__
11604+extern unsigned char MODULES_EXEC_VADDR[];
11605+extern unsigned char MODULES_EXEC_END[];
11606+#endif
11607+#include <asm/boot.h>
11608+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11609+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11610+#else
11611+#define ktla_ktva(addr) (addr)
11612+#define ktva_ktla(addr) (addr)
11613+#endif
11614+
11615 #define MODULES_VADDR VMALLOC_START
11616 #define MODULES_END VMALLOC_END
11617 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11618diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11619index 975f709..9f779c9 100644
11620--- a/arch/x86/include/asm/pgtable_64.h
11621+++ b/arch/x86/include/asm/pgtable_64.h
11622@@ -16,10 +16,14 @@
11623
11624 extern pud_t level3_kernel_pgt[512];
11625 extern pud_t level3_ident_pgt[512];
11626+extern pud_t level3_vmalloc_start_pgt[512];
11627+extern pud_t level3_vmalloc_end_pgt[512];
11628+extern pud_t level3_vmemmap_pgt[512];
11629+extern pud_t level2_vmemmap_pgt[512];
11630 extern pmd_t level2_kernel_pgt[512];
11631 extern pmd_t level2_fixmap_pgt[512];
11632-extern pmd_t level2_ident_pgt[512];
11633-extern pgd_t init_level4_pgt[];
11634+extern pmd_t level2_ident_pgt[512*2];
11635+extern pgd_t init_level4_pgt[512];
11636
11637 #define swapper_pg_dir init_level4_pgt
11638
11639@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11640
11641 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11642 {
11643+ pax_open_kernel();
11644 *pmdp = pmd;
11645+ pax_close_kernel();
11646 }
11647
11648 static inline void native_pmd_clear(pmd_t *pmd)
11649@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
11650
11651 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11652 {
11653+ pax_open_kernel();
11654 *pudp = pud;
11655+ pax_close_kernel();
11656 }
11657
11658 static inline void native_pud_clear(pud_t *pud)
11659@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
11660
11661 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11662 {
11663+ pax_open_kernel();
11664+ *pgdp = pgd;
11665+ pax_close_kernel();
11666+}
11667+
11668+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11669+{
11670 *pgdp = pgd;
11671 }
11672
11673diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11674index 766ea16..5b96cb3 100644
11675--- a/arch/x86/include/asm/pgtable_64_types.h
11676+++ b/arch/x86/include/asm/pgtable_64_types.h
11677@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11678 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11679 #define MODULES_END _AC(0xffffffffff000000, UL)
11680 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11681+#define MODULES_EXEC_VADDR MODULES_VADDR
11682+#define MODULES_EXEC_END MODULES_END
11683+
11684+#define ktla_ktva(addr) (addr)
11685+#define ktva_ktla(addr) (addr)
11686
11687 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11688diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11689index 013286a..8b42f4f 100644
11690--- a/arch/x86/include/asm/pgtable_types.h
11691+++ b/arch/x86/include/asm/pgtable_types.h
11692@@ -16,13 +16,12 @@
11693 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11694 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11695 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11696-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11697+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11698 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11699 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11700 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11701-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11702-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11703-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11704+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11705+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11706 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11707
11708 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11709@@ -40,7 +39,6 @@
11710 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11711 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11712 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11713-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11714 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11715 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11716 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11717@@ -57,8 +55,10 @@
11718
11719 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11720 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11721-#else
11722+#elif defined(CONFIG_KMEMCHECK)
11723 #define _PAGE_NX (_AT(pteval_t, 0))
11724+#else
11725+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11726 #endif
11727
11728 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11729@@ -96,6 +96,9 @@
11730 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11731 _PAGE_ACCESSED)
11732
11733+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11734+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11735+
11736 #define __PAGE_KERNEL_EXEC \
11737 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11738 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11739@@ -106,7 +109,7 @@
11740 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11741 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11742 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11743-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11744+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11745 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11746 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11747 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11748@@ -168,8 +171,8 @@
11749 * bits are combined, this will alow user to access the high address mapped
11750 * VDSO in the presence of CONFIG_COMPAT_VDSO
11751 */
11752-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11753-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11754+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11755+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11756 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11757 #endif
11758
11759@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11760 {
11761 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11762 }
11763+#endif
11764
11765+#if PAGETABLE_LEVELS == 3
11766+#include <asm-generic/pgtable-nopud.h>
11767+#endif
11768+
11769+#if PAGETABLE_LEVELS == 2
11770+#include <asm-generic/pgtable-nopmd.h>
11771+#endif
11772+
11773+#ifndef __ASSEMBLY__
11774 #if PAGETABLE_LEVELS > 3
11775 typedef struct { pudval_t pud; } pud_t;
11776
11777@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11778 return pud.pud;
11779 }
11780 #else
11781-#include <asm-generic/pgtable-nopud.h>
11782-
11783 static inline pudval_t native_pud_val(pud_t pud)
11784 {
11785 return native_pgd_val(pud.pgd);
11786@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11787 return pmd.pmd;
11788 }
11789 #else
11790-#include <asm-generic/pgtable-nopmd.h>
11791-
11792 static inline pmdval_t native_pmd_val(pmd_t pmd)
11793 {
11794 return native_pgd_val(pmd.pud.pgd);
11795@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11796
11797 extern pteval_t __supported_pte_mask;
11798 extern void set_nx(void);
11799-extern int nx_enabled;
11800
11801 #define pgprot_writecombine pgprot_writecombine
11802 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11803diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
11804index f8ab3ea..67889db 100644
11805--- a/arch/x86/include/asm/processor-flags.h
11806+++ b/arch/x86/include/asm/processor-flags.h
11807@@ -63,6 +63,7 @@
11808 #define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
11809 #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
11810 #define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
11811+#define X86_CR4_SMAP 0x00200000 /* enable SMAP support */
11812
11813 /*
11814 * x86-64 Task Priority Register, CR8
11815diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11816index 39bc577..538233f 100644
11817--- a/arch/x86/include/asm/processor.h
11818+++ b/arch/x86/include/asm/processor.h
11819@@ -276,7 +276,7 @@ struct tss_struct {
11820
11821 } ____cacheline_aligned;
11822
11823-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11824+extern struct tss_struct init_tss[NR_CPUS];
11825
11826 /*
11827 * Save the original ist values for checking stack pointers during debugging
11828@@ -809,11 +809,18 @@ static inline void spin_lock_prefetch(const void *x)
11829 */
11830 #define TASK_SIZE PAGE_OFFSET
11831 #define TASK_SIZE_MAX TASK_SIZE
11832+
11833+#ifdef CONFIG_PAX_SEGMEXEC
11834+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11835+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11836+#else
11837 #define STACK_TOP TASK_SIZE
11838-#define STACK_TOP_MAX STACK_TOP
11839+#endif
11840+
11841+#define STACK_TOP_MAX TASK_SIZE
11842
11843 #define INIT_THREAD { \
11844- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11845+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11846 .vm86_info = NULL, \
11847 .sysenter_cs = __KERNEL_CS, \
11848 .io_bitmap_ptr = NULL, \
11849@@ -827,7 +834,7 @@ static inline void spin_lock_prefetch(const void *x)
11850 */
11851 #define INIT_TSS { \
11852 .x86_tss = { \
11853- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11854+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11855 .ss0 = __KERNEL_DS, \
11856 .ss1 = __KERNEL_CS, \
11857 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11858@@ -838,11 +845,7 @@ static inline void spin_lock_prefetch(const void *x)
11859 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11860
11861 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11862-#define KSTK_TOP(info) \
11863-({ \
11864- unsigned long *__ptr = (unsigned long *)(info); \
11865- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11866-})
11867+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11868
11869 /*
11870 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11871@@ -857,7 +860,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11872 #define task_pt_regs(task) \
11873 ({ \
11874 struct pt_regs *__regs__; \
11875- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11876+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11877 __regs__ - 1; \
11878 })
11879
11880@@ -867,13 +870,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11881 /*
11882 * User space process size. 47bits minus one guard page.
11883 */
11884-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11885+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11886
11887 /* This decides where the kernel will search for a free chunk of vm
11888 * space during mmap's.
11889 */
11890 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11891- 0xc0000000 : 0xFFFFe000)
11892+ 0xc0000000 : 0xFFFFf000)
11893
11894 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
11895 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11896@@ -884,11 +887,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11897 #define STACK_TOP_MAX TASK_SIZE_MAX
11898
11899 #define INIT_THREAD { \
11900- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11901+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11902 }
11903
11904 #define INIT_TSS { \
11905- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11906+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11907 }
11908
11909 /*
11910@@ -916,6 +919,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11911 */
11912 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11913
11914+#ifdef CONFIG_PAX_SEGMEXEC
11915+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11916+#endif
11917+
11918 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11919
11920 /* Get/set a process' ability to use the timestamp counter instruction */
11921@@ -976,12 +983,12 @@ extern bool cpu_has_amd_erratum(const int *);
11922 #define cpu_has_amd_erratum(x) (false)
11923 #endif /* CONFIG_CPU_SUP_AMD */
11924
11925-extern unsigned long arch_align_stack(unsigned long sp);
11926+#define arch_align_stack(x) ((x) & ~0xfUL)
11927 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11928
11929 void default_idle(void);
11930 bool set_pm_idle_to_default(void);
11931
11932-void stop_this_cpu(void *dummy);
11933+void stop_this_cpu(void *dummy) __noreturn;
11934
11935 #endif /* _ASM_X86_PROCESSOR_H */
11936diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11937index dcfde52..dbfea06 100644
11938--- a/arch/x86/include/asm/ptrace.h
11939+++ b/arch/x86/include/asm/ptrace.h
11940@@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11941 }
11942
11943 /*
11944- * user_mode_vm(regs) determines whether a register set came from user mode.
11945+ * user_mode(regs) determines whether a register set came from user mode.
11946 * This is true if V8086 mode was enabled OR if the register set was from
11947 * protected mode with RPL-3 CS value. This tricky test checks that with
11948 * one comparison. Many places in the kernel can bypass this full check
11949- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11950+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11951+ * be used.
11952 */
11953-static inline int user_mode(struct pt_regs *regs)
11954+static inline int user_mode_novm(struct pt_regs *regs)
11955 {
11956 #ifdef CONFIG_X86_32
11957 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11958 #else
11959- return !!(regs->cs & 3);
11960+ return !!(regs->cs & SEGMENT_RPL_MASK);
11961 #endif
11962 }
11963
11964-static inline int user_mode_vm(struct pt_regs *regs)
11965+static inline int user_mode(struct pt_regs *regs)
11966 {
11967 #ifdef CONFIG_X86_32
11968 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11969 USER_RPL;
11970 #else
11971- return user_mode(regs);
11972+ return user_mode_novm(regs);
11973 #endif
11974 }
11975
11976@@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11977 #ifdef CONFIG_X86_64
11978 static inline bool user_64bit_mode(struct pt_regs *regs)
11979 {
11980+ unsigned long cs = regs->cs & 0xffff;
11981 #ifndef CONFIG_PARAVIRT
11982 /*
11983 * On non-paravirt systems, this is the only long mode CPL 3
11984 * selector. We do not allow long mode selectors in the LDT.
11985 */
11986- return regs->cs == __USER_CS;
11987+ return cs == __USER_CS;
11988 #else
11989 /* Headers are too twisted for this to go in paravirt.h. */
11990- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11991+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11992 #endif
11993 }
11994 #endif
11995diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
11996index fce3f4a..3f69f2a 100644
11997--- a/arch/x86/include/asm/realmode.h
11998+++ b/arch/x86/include/asm/realmode.h
11999@@ -30,7 +30,7 @@ struct real_mode_header {
12000 struct trampoline_header {
12001 #ifdef CONFIG_X86_32
12002 u32 start;
12003- u16 gdt_pad;
12004+ u16 boot_cs;
12005 u16 gdt_limit;
12006 u32 gdt_base;
12007 #else
12008diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
12009index 92f29706..d0a1a53 100644
12010--- a/arch/x86/include/asm/reboot.h
12011+++ b/arch/x86/include/asm/reboot.h
12012@@ -6,19 +6,19 @@
12013 struct pt_regs;
12014
12015 struct machine_ops {
12016- void (*restart)(char *cmd);
12017- void (*halt)(void);
12018- void (*power_off)(void);
12019+ void (* __noreturn restart)(char *cmd);
12020+ void (* __noreturn halt)(void);
12021+ void (* __noreturn power_off)(void);
12022 void (*shutdown)(void);
12023 void (*crash_shutdown)(struct pt_regs *);
12024- void (*emergency_restart)(void);
12025-};
12026+ void (* __noreturn emergency_restart)(void);
12027+} __no_const;
12028
12029 extern struct machine_ops machine_ops;
12030
12031 void native_machine_crash_shutdown(struct pt_regs *regs);
12032 void native_machine_shutdown(void);
12033-void machine_real_restart(unsigned int type);
12034+void __noreturn machine_real_restart(unsigned int type);
12035 /* These must match dispatch_table in reboot_32.S */
12036 #define MRR_BIOS 0
12037 #define MRR_APM 1
12038diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
12039index 2dbe4a7..ce1db00 100644
12040--- a/arch/x86/include/asm/rwsem.h
12041+++ b/arch/x86/include/asm/rwsem.h
12042@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
12043 {
12044 asm volatile("# beginning down_read\n\t"
12045 LOCK_PREFIX _ASM_INC "(%1)\n\t"
12046+
12047+#ifdef CONFIG_PAX_REFCOUNT
12048+ "jno 0f\n"
12049+ LOCK_PREFIX _ASM_DEC "(%1)\n"
12050+ "int $4\n0:\n"
12051+ _ASM_EXTABLE(0b, 0b)
12052+#endif
12053+
12054 /* adds 0x00000001 */
12055 " jns 1f\n"
12056 " call call_rwsem_down_read_failed\n"
12057@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12058 "1:\n\t"
12059 " mov %1,%2\n\t"
12060 " add %3,%2\n\t"
12061+
12062+#ifdef CONFIG_PAX_REFCOUNT
12063+ "jno 0f\n"
12064+ "sub %3,%2\n"
12065+ "int $4\n0:\n"
12066+ _ASM_EXTABLE(0b, 0b)
12067+#endif
12068+
12069 " jle 2f\n\t"
12070 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12071 " jnz 1b\n\t"
12072@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12073 long tmp;
12074 asm volatile("# beginning down_write\n\t"
12075 LOCK_PREFIX " xadd %1,(%2)\n\t"
12076+
12077+#ifdef CONFIG_PAX_REFCOUNT
12078+ "jno 0f\n"
12079+ "mov %1,(%2)\n"
12080+ "int $4\n0:\n"
12081+ _ASM_EXTABLE(0b, 0b)
12082+#endif
12083+
12084 /* adds 0xffff0001, returns the old value */
12085 " test %1,%1\n\t"
12086 /* was the count 0 before? */
12087@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12088 long tmp;
12089 asm volatile("# beginning __up_read\n\t"
12090 LOCK_PREFIX " xadd %1,(%2)\n\t"
12091+
12092+#ifdef CONFIG_PAX_REFCOUNT
12093+ "jno 0f\n"
12094+ "mov %1,(%2)\n"
12095+ "int $4\n0:\n"
12096+ _ASM_EXTABLE(0b, 0b)
12097+#endif
12098+
12099 /* subtracts 1, returns the old value */
12100 " jns 1f\n\t"
12101 " call call_rwsem_wake\n" /* expects old value in %edx */
12102@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12103 long tmp;
12104 asm volatile("# beginning __up_write\n\t"
12105 LOCK_PREFIX " xadd %1,(%2)\n\t"
12106+
12107+#ifdef CONFIG_PAX_REFCOUNT
12108+ "jno 0f\n"
12109+ "mov %1,(%2)\n"
12110+ "int $4\n0:\n"
12111+ _ASM_EXTABLE(0b, 0b)
12112+#endif
12113+
12114 /* subtracts 0xffff0001, returns the old value */
12115 " jns 1f\n\t"
12116 " call call_rwsem_wake\n" /* expects old value in %edx */
12117@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12118 {
12119 asm volatile("# beginning __downgrade_write\n\t"
12120 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12121+
12122+#ifdef CONFIG_PAX_REFCOUNT
12123+ "jno 0f\n"
12124+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12125+ "int $4\n0:\n"
12126+ _ASM_EXTABLE(0b, 0b)
12127+#endif
12128+
12129 /*
12130 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12131 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12132@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12133 */
12134 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12135 {
12136- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12137+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12138+
12139+#ifdef CONFIG_PAX_REFCOUNT
12140+ "jno 0f\n"
12141+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
12142+ "int $4\n0:\n"
12143+ _ASM_EXTABLE(0b, 0b)
12144+#endif
12145+
12146 : "+m" (sem->count)
12147 : "er" (delta));
12148 }
12149@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12150 */
12151 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
12152 {
12153- return delta + xadd(&sem->count, delta);
12154+ return delta + xadd_check_overflow(&sem->count, delta);
12155 }
12156
12157 #endif /* __KERNEL__ */
12158diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12159index c48a950..c6d7468 100644
12160--- a/arch/x86/include/asm/segment.h
12161+++ b/arch/x86/include/asm/segment.h
12162@@ -64,10 +64,15 @@
12163 * 26 - ESPFIX small SS
12164 * 27 - per-cpu [ offset to per-cpu data area ]
12165 * 28 - stack_canary-20 [ for stack protector ]
12166- * 29 - unused
12167- * 30 - unused
12168+ * 29 - PCI BIOS CS
12169+ * 30 - PCI BIOS DS
12170 * 31 - TSS for double fault handler
12171 */
12172+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12173+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12174+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12175+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12176+
12177 #define GDT_ENTRY_TLS_MIN 6
12178 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12179
12180@@ -79,6 +84,8 @@
12181
12182 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
12183
12184+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12185+
12186 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
12187
12188 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
12189@@ -104,6 +111,12 @@
12190 #define __KERNEL_STACK_CANARY 0
12191 #endif
12192
12193+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
12194+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12195+
12196+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
12197+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12198+
12199 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12200
12201 /*
12202@@ -141,7 +154,7 @@
12203 */
12204
12205 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12206-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12207+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12208
12209
12210 #else
12211@@ -165,6 +178,8 @@
12212 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
12213 #define __USER32_DS __USER_DS
12214
12215+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12216+
12217 #define GDT_ENTRY_TSS 8 /* needs two entries */
12218 #define GDT_ENTRY_LDT 10 /* needs two entries */
12219 #define GDT_ENTRY_TLS_MIN 12
12220@@ -185,6 +200,7 @@
12221 #endif
12222
12223 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
12224+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
12225 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
12226 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
12227 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
12228@@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
12229 {
12230 unsigned long __limit;
12231 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12232- return __limit + 1;
12233+ return __limit;
12234 }
12235
12236 #endif /* !__ASSEMBLY__ */
12237diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12238index f483945..64a7851 100644
12239--- a/arch/x86/include/asm/smp.h
12240+++ b/arch/x86/include/asm/smp.h
12241@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12242 /* cpus sharing the last level cache: */
12243 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
12244 DECLARE_PER_CPU(u16, cpu_llc_id);
12245-DECLARE_PER_CPU(int, cpu_number);
12246+DECLARE_PER_CPU(unsigned int, cpu_number);
12247
12248 static inline struct cpumask *cpu_sibling_mask(int cpu)
12249 {
12250@@ -79,7 +79,7 @@ struct smp_ops {
12251
12252 void (*send_call_func_ipi)(const struct cpumask *mask);
12253 void (*send_call_func_single_ipi)(int cpu);
12254-};
12255+} __no_const;
12256
12257 /* Globals due to paravirt */
12258 extern void set_cpu_sibling_map(int cpu);
12259@@ -195,14 +195,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12260 extern int safe_smp_processor_id(void);
12261
12262 #elif defined(CONFIG_X86_64_SMP)
12263-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
12264-
12265-#define stack_smp_processor_id() \
12266-({ \
12267- struct thread_info *ti; \
12268- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12269- ti->cpu; \
12270-})
12271+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
12272+#define stack_smp_processor_id() raw_smp_processor_id()
12273 #define safe_smp_processor_id() smp_processor_id()
12274
12275 #endif
12276diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12277index b315a33..8849ab0 100644
12278--- a/arch/x86/include/asm/spinlock.h
12279+++ b/arch/x86/include/asm/spinlock.h
12280@@ -173,6 +173,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
12281 static inline void arch_read_lock(arch_rwlock_t *rw)
12282 {
12283 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
12284+
12285+#ifdef CONFIG_PAX_REFCOUNT
12286+ "jno 0f\n"
12287+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
12288+ "int $4\n0:\n"
12289+ _ASM_EXTABLE(0b, 0b)
12290+#endif
12291+
12292 "jns 1f\n"
12293 "call __read_lock_failed\n\t"
12294 "1:\n"
12295@@ -182,6 +190,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
12296 static inline void arch_write_lock(arch_rwlock_t *rw)
12297 {
12298 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
12299+
12300+#ifdef CONFIG_PAX_REFCOUNT
12301+ "jno 0f\n"
12302+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
12303+ "int $4\n0:\n"
12304+ _ASM_EXTABLE(0b, 0b)
12305+#endif
12306+
12307 "jz 1f\n"
12308 "call __write_lock_failed\n\t"
12309 "1:\n"
12310@@ -211,13 +227,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
12311
12312 static inline void arch_read_unlock(arch_rwlock_t *rw)
12313 {
12314- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
12315+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
12316+
12317+#ifdef CONFIG_PAX_REFCOUNT
12318+ "jno 0f\n"
12319+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
12320+ "int $4\n0:\n"
12321+ _ASM_EXTABLE(0b, 0b)
12322+#endif
12323+
12324 :"+m" (rw->lock) : : "memory");
12325 }
12326
12327 static inline void arch_write_unlock(arch_rwlock_t *rw)
12328 {
12329- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
12330+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
12331+
12332+#ifdef CONFIG_PAX_REFCOUNT
12333+ "jno 0f\n"
12334+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
12335+ "int $4\n0:\n"
12336+ _ASM_EXTABLE(0b, 0b)
12337+#endif
12338+
12339 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
12340 }
12341
12342diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12343index 6a99859..03cb807 100644
12344--- a/arch/x86/include/asm/stackprotector.h
12345+++ b/arch/x86/include/asm/stackprotector.h
12346@@ -47,7 +47,7 @@
12347 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12348 */
12349 #define GDT_STACK_CANARY_INIT \
12350- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12351+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12352
12353 /*
12354 * Initialize the stackprotector canary value.
12355@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
12356
12357 static inline void load_stack_canary_segment(void)
12358 {
12359-#ifdef CONFIG_X86_32
12360+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12361 asm volatile ("mov %0, %%gs" : : "r" (0));
12362 #endif
12363 }
12364diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
12365index 70bbe39..4ae2bd4 100644
12366--- a/arch/x86/include/asm/stacktrace.h
12367+++ b/arch/x86/include/asm/stacktrace.h
12368@@ -11,28 +11,20 @@
12369
12370 extern int kstack_depth_to_print;
12371
12372-struct thread_info;
12373+struct task_struct;
12374 struct stacktrace_ops;
12375
12376-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
12377- unsigned long *stack,
12378- unsigned long bp,
12379- const struct stacktrace_ops *ops,
12380- void *data,
12381- unsigned long *end,
12382- int *graph);
12383+typedef unsigned long walk_stack_t(struct task_struct *task,
12384+ void *stack_start,
12385+ unsigned long *stack,
12386+ unsigned long bp,
12387+ const struct stacktrace_ops *ops,
12388+ void *data,
12389+ unsigned long *end,
12390+ int *graph);
12391
12392-extern unsigned long
12393-print_context_stack(struct thread_info *tinfo,
12394- unsigned long *stack, unsigned long bp,
12395- const struct stacktrace_ops *ops, void *data,
12396- unsigned long *end, int *graph);
12397-
12398-extern unsigned long
12399-print_context_stack_bp(struct thread_info *tinfo,
12400- unsigned long *stack, unsigned long bp,
12401- const struct stacktrace_ops *ops, void *data,
12402- unsigned long *end, int *graph);
12403+extern walk_stack_t print_context_stack;
12404+extern walk_stack_t print_context_stack_bp;
12405
12406 /* Generic stack tracer with callbacks */
12407
12408@@ -40,7 +32,7 @@ struct stacktrace_ops {
12409 void (*address)(void *data, unsigned long address, int reliable);
12410 /* On negative return stop dumping */
12411 int (*stack)(void *data, char *name);
12412- walk_stack_t walk_stack;
12413+ walk_stack_t *walk_stack;
12414 };
12415
12416 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
12417diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
12418index 4ec45b3..a4f0a8a 100644
12419--- a/arch/x86/include/asm/switch_to.h
12420+++ b/arch/x86/include/asm/switch_to.h
12421@@ -108,7 +108,7 @@ do { \
12422 "call __switch_to\n\t" \
12423 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12424 __switch_canary \
12425- "movq %P[thread_info](%%rsi),%%r8\n\t" \
12426+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12427 "movq %%rax,%%rdi\n\t" \
12428 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12429 "jnz ret_from_fork\n\t" \
12430@@ -119,7 +119,7 @@ do { \
12431 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12432 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12433 [_tif_fork] "i" (_TIF_FORK), \
12434- [thread_info] "i" (offsetof(struct task_struct, stack)), \
12435+ [thread_info] "m" (current_tinfo), \
12436 [current_task] "m" (current_task) \
12437 __switch_canary_iparam \
12438 : "memory", "cc" __EXTRA_CLOBBER)
12439diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
12440index 3fda9db4..4ca1c61 100644
12441--- a/arch/x86/include/asm/sys_ia32.h
12442+++ b/arch/x86/include/asm/sys_ia32.h
12443@@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
12444 struct old_sigaction32 __user *);
12445 asmlinkage long sys32_alarm(unsigned int);
12446
12447-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
12448+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
12449 asmlinkage long sys32_sysfs(int, u32, u32);
12450
12451 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
12452diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12453index 89f794f..1422765 100644
12454--- a/arch/x86/include/asm/thread_info.h
12455+++ b/arch/x86/include/asm/thread_info.h
12456@@ -10,6 +10,7 @@
12457 #include <linux/compiler.h>
12458 #include <asm/page.h>
12459 #include <asm/types.h>
12460+#include <asm/percpu.h>
12461
12462 /*
12463 * low level task data that entry.S needs immediate access to
12464@@ -24,7 +25,6 @@ struct exec_domain;
12465 #include <linux/atomic.h>
12466
12467 struct thread_info {
12468- struct task_struct *task; /* main task structure */
12469 struct exec_domain *exec_domain; /* execution domain */
12470 __u32 flags; /* low level flags */
12471 __u32 status; /* thread synchronous flags */
12472@@ -34,19 +34,13 @@ struct thread_info {
12473 mm_segment_t addr_limit;
12474 struct restart_block restart_block;
12475 void __user *sysenter_return;
12476-#ifdef CONFIG_X86_32
12477- unsigned long previous_esp; /* ESP of the previous stack in
12478- case of nested (IRQ) stacks
12479- */
12480- __u8 supervisor_stack[0];
12481-#endif
12482+ unsigned long lowest_stack;
12483 unsigned int sig_on_uaccess_error:1;
12484 unsigned int uaccess_err:1; /* uaccess failed */
12485 };
12486
12487-#define INIT_THREAD_INFO(tsk) \
12488+#define INIT_THREAD_INFO \
12489 { \
12490- .task = &tsk, \
12491 .exec_domain = &default_exec_domain, \
12492 .flags = 0, \
12493 .cpu = 0, \
12494@@ -57,7 +51,7 @@ struct thread_info {
12495 }, \
12496 }
12497
12498-#define init_thread_info (init_thread_union.thread_info)
12499+#define init_thread_info (init_thread_union.stack)
12500 #define init_stack (init_thread_union.stack)
12501
12502 #else /* !__ASSEMBLY__ */
12503@@ -98,6 +92,7 @@ struct thread_info {
12504 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
12505 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
12506 #define TIF_X32 30 /* 32-bit native x86-64 binary */
12507+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
12508
12509 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
12510 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
12511@@ -122,16 +117,18 @@ struct thread_info {
12512 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
12513 #define _TIF_ADDR32 (1 << TIF_ADDR32)
12514 #define _TIF_X32 (1 << TIF_X32)
12515+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
12516
12517 /* work to do in syscall_trace_enter() */
12518 #define _TIF_WORK_SYSCALL_ENTRY \
12519 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
12520- _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
12521+ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
12522+ _TIF_GRSEC_SETXID)
12523
12524 /* work to do in syscall_trace_leave() */
12525 #define _TIF_WORK_SYSCALL_EXIT \
12526 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
12527- _TIF_SYSCALL_TRACEPOINT)
12528+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12529
12530 /* work to do on interrupt/exception return */
12531 #define _TIF_WORK_MASK \
12532@@ -141,7 +138,8 @@ struct thread_info {
12533
12534 /* work to do on any return to user space */
12535 #define _TIF_ALLWORK_MASK \
12536- ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
12537+ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
12538+ _TIF_GRSEC_SETXID)
12539
12540 /* Only used for 64 bit */
12541 #define _TIF_DO_NOTIFY_MASK \
12542@@ -157,45 +155,40 @@ struct thread_info {
12543
12544 #define PREEMPT_ACTIVE 0x10000000
12545
12546-#ifdef CONFIG_X86_32
12547-
12548-#define STACK_WARN (THREAD_SIZE/8)
12549-/*
12550- * macros/functions for gaining access to the thread information structure
12551- *
12552- * preempt_count needs to be 1 initially, until the scheduler is functional.
12553- */
12554-#ifndef __ASSEMBLY__
12555-
12556-
12557-/* how to get the current stack pointer from C */
12558-register unsigned long current_stack_pointer asm("esp") __used;
12559-
12560-/* how to get the thread information struct from C */
12561-static inline struct thread_info *current_thread_info(void)
12562-{
12563- return (struct thread_info *)
12564- (current_stack_pointer & ~(THREAD_SIZE - 1));
12565-}
12566-
12567-#else /* !__ASSEMBLY__ */
12568-
12569+#ifdef __ASSEMBLY__
12570 /* how to get the thread information struct from ASM */
12571 #define GET_THREAD_INFO(reg) \
12572- movl $-THREAD_SIZE, reg; \
12573- andl %esp, reg
12574+ mov PER_CPU_VAR(current_tinfo), reg
12575
12576 /* use this one if reg already contains %esp */
12577-#define GET_THREAD_INFO_WITH_ESP(reg) \
12578- andl $-THREAD_SIZE, reg
12579+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12580+#else
12581+/* how to get the thread information struct from C */
12582+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12583+
12584+static __always_inline struct thread_info *current_thread_info(void)
12585+{
12586+ return this_cpu_read_stable(current_tinfo);
12587+}
12588+#endif
12589+
12590+#ifdef CONFIG_X86_32
12591+
12592+#define STACK_WARN (THREAD_SIZE/8)
12593+/*
12594+ * macros/functions for gaining access to the thread information structure
12595+ *
12596+ * preempt_count needs to be 1 initially, until the scheduler is functional.
12597+ */
12598+#ifndef __ASSEMBLY__
12599+
12600+/* how to get the current stack pointer from C */
12601+register unsigned long current_stack_pointer asm("esp") __used;
12602
12603 #endif
12604
12605 #else /* X86_32 */
12606
12607-#include <asm/percpu.h>
12608-#define KERNEL_STACK_OFFSET (5*8)
12609-
12610 /*
12611 * macros/functions for gaining access to the thread information structure
12612 * preempt_count needs to be 1 initially, until the scheduler is functional.
12613@@ -203,27 +196,8 @@ static inline struct thread_info *current_thread_info(void)
12614 #ifndef __ASSEMBLY__
12615 DECLARE_PER_CPU(unsigned long, kernel_stack);
12616
12617-static inline struct thread_info *current_thread_info(void)
12618-{
12619- struct thread_info *ti;
12620- ti = (void *)(this_cpu_read_stable(kernel_stack) +
12621- KERNEL_STACK_OFFSET - THREAD_SIZE);
12622- return ti;
12623-}
12624-
12625-#else /* !__ASSEMBLY__ */
12626-
12627-/* how to get the thread information struct from ASM */
12628-#define GET_THREAD_INFO(reg) \
12629- movq PER_CPU_VAR(kernel_stack),reg ; \
12630- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12631-
12632-/*
12633- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12634- * a certain register (to be used in assembler memory operands).
12635- */
12636-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12637-
12638+/* how to get the current stack pointer from C */
12639+register unsigned long current_stack_pointer asm("rsp") __used;
12640 #endif
12641
12642 #endif /* !X86_32 */
12643@@ -284,5 +258,12 @@ static inline bool is_ia32_task(void)
12644 extern void arch_task_cache_init(void);
12645 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12646 extern void arch_release_task_struct(struct task_struct *tsk);
12647+
12648+#define __HAVE_THREAD_FUNCTIONS
12649+#define task_thread_info(task) (&(task)->tinfo)
12650+#define task_stack_page(task) ((task)->stack)
12651+#define setup_thread_stack(p, org) do {} while (0)
12652+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12653+
12654 #endif
12655 #endif /* _ASM_X86_THREAD_INFO_H */
12656diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12657index e1f3a17..1ab364d 100644
12658--- a/arch/x86/include/asm/uaccess.h
12659+++ b/arch/x86/include/asm/uaccess.h
12660@@ -7,12 +7,15 @@
12661 #include <linux/compiler.h>
12662 #include <linux/thread_info.h>
12663 #include <linux/string.h>
12664+#include <linux/sched.h>
12665 #include <asm/asm.h>
12666 #include <asm/page.h>
12667
12668 #define VERIFY_READ 0
12669 #define VERIFY_WRITE 1
12670
12671+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12672+
12673 /*
12674 * The fs value determines whether argument validity checking should be
12675 * performed or not. If get_fs() == USER_DS, checking is performed, with
12676@@ -28,7 +31,12 @@
12677
12678 #define get_ds() (KERNEL_DS)
12679 #define get_fs() (current_thread_info()->addr_limit)
12680+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12681+void __set_fs(mm_segment_t x);
12682+void set_fs(mm_segment_t x);
12683+#else
12684 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12685+#endif
12686
12687 #define segment_eq(a, b) ((a).seg == (b).seg)
12688
12689@@ -76,8 +84,33 @@
12690 * checks that the pointer is in the user space range - after calling
12691 * this function, memory access functions may still return -EFAULT.
12692 */
12693-#define access_ok(type, addr, size) \
12694- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
12695+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
12696+#define access_ok(type, addr, size) \
12697+({ \
12698+ long __size = size; \
12699+ unsigned long __addr = (unsigned long)addr; \
12700+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12701+ unsigned long __end_ao = __addr + __size - 1; \
12702+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
12703+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12704+ while(__addr_ao <= __end_ao) { \
12705+ char __c_ao; \
12706+ __addr_ao += PAGE_SIZE; \
12707+ if (__size > PAGE_SIZE) \
12708+ cond_resched(); \
12709+ if (__get_user(__c_ao, (char __user *)__addr)) \
12710+ break; \
12711+ if (type != VERIFY_WRITE) { \
12712+ __addr = __addr_ao; \
12713+ continue; \
12714+ } \
12715+ if (__put_user(__c_ao, (char __user *)__addr)) \
12716+ break; \
12717+ __addr = __addr_ao; \
12718+ } \
12719+ } \
12720+ __ret_ao; \
12721+})
12722
12723 /*
12724 * The exception table consists of pairs of addresses relative to the
12725@@ -188,12 +221,20 @@ extern int __get_user_bad(void);
12726 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12727 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12728
12729-
12730+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12731+#define __copyuser_seg "gs;"
12732+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12733+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12734+#else
12735+#define __copyuser_seg
12736+#define __COPYUSER_SET_ES
12737+#define __COPYUSER_RESTORE_ES
12738+#endif
12739
12740 #ifdef CONFIG_X86_32
12741 #define __put_user_asm_u64(x, addr, err, errret) \
12742- asm volatile("1: movl %%eax,0(%2)\n" \
12743- "2: movl %%edx,4(%2)\n" \
12744+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12745+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12746 "3:\n" \
12747 ".section .fixup,\"ax\"\n" \
12748 "4: movl %3,%0\n" \
12749@@ -205,8 +246,8 @@ extern int __get_user_bad(void);
12750 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12751
12752 #define __put_user_asm_ex_u64(x, addr) \
12753- asm volatile("1: movl %%eax,0(%1)\n" \
12754- "2: movl %%edx,4(%1)\n" \
12755+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12756+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12757 "3:\n" \
12758 _ASM_EXTABLE_EX(1b, 2b) \
12759 _ASM_EXTABLE_EX(2b, 3b) \
12760@@ -258,7 +299,7 @@ extern void __put_user_8(void);
12761 __typeof__(*(ptr)) __pu_val; \
12762 __chk_user_ptr(ptr); \
12763 might_fault(); \
12764- __pu_val = x; \
12765+ __pu_val = (x); \
12766 switch (sizeof(*(ptr))) { \
12767 case 1: \
12768 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12769@@ -379,7 +420,7 @@ do { \
12770 } while (0)
12771
12772 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12773- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12774+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12775 "2:\n" \
12776 ".section .fixup,\"ax\"\n" \
12777 "3: mov %3,%0\n" \
12778@@ -387,7 +428,7 @@ do { \
12779 " jmp 2b\n" \
12780 ".previous\n" \
12781 _ASM_EXTABLE(1b, 3b) \
12782- : "=r" (err), ltype(x) \
12783+ : "=r" (err), ltype (x) \
12784 : "m" (__m(addr)), "i" (errret), "0" (err))
12785
12786 #define __get_user_size_ex(x, ptr, size) \
12787@@ -412,7 +453,7 @@ do { \
12788 } while (0)
12789
12790 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12791- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12792+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12793 "2:\n" \
12794 _ASM_EXTABLE_EX(1b, 2b) \
12795 : ltype(x) : "m" (__m(addr)))
12796@@ -429,13 +470,24 @@ do { \
12797 int __gu_err; \
12798 unsigned long __gu_val; \
12799 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12800- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12801+ (x) = (__typeof__(*(ptr)))__gu_val; \
12802 __gu_err; \
12803 })
12804
12805 /* FIXME: this hack is definitely wrong -AK */
12806 struct __large_struct { unsigned long buf[100]; };
12807-#define __m(x) (*(struct __large_struct __user *)(x))
12808+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12809+#define ____m(x) \
12810+({ \
12811+ unsigned long ____x = (unsigned long)(x); \
12812+ if (____x < PAX_USER_SHADOW_BASE) \
12813+ ____x += PAX_USER_SHADOW_BASE; \
12814+ (void __user *)____x; \
12815+})
12816+#else
12817+#define ____m(x) (x)
12818+#endif
12819+#define __m(x) (*(struct __large_struct __user *)____m(x))
12820
12821 /*
12822 * Tell gcc we read from memory instead of writing: this is because
12823@@ -443,7 +495,7 @@ struct __large_struct { unsigned long buf[100]; };
12824 * aliasing issues.
12825 */
12826 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12827- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12828+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12829 "2:\n" \
12830 ".section .fixup,\"ax\"\n" \
12831 "3: mov %3,%0\n" \
12832@@ -451,10 +503,10 @@ struct __large_struct { unsigned long buf[100]; };
12833 ".previous\n" \
12834 _ASM_EXTABLE(1b, 3b) \
12835 : "=r"(err) \
12836- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12837+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12838
12839 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12840- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12841+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12842 "2:\n" \
12843 _ASM_EXTABLE_EX(1b, 2b) \
12844 : : ltype(x), "m" (__m(addr)))
12845@@ -493,8 +545,12 @@ struct __large_struct { unsigned long buf[100]; };
12846 * On error, the variable @x is set to zero.
12847 */
12848
12849+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12850+#define __get_user(x, ptr) get_user((x), (ptr))
12851+#else
12852 #define __get_user(x, ptr) \
12853 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12854+#endif
12855
12856 /**
12857 * __put_user: - Write a simple value into user space, with less checking.
12858@@ -516,8 +572,12 @@ struct __large_struct { unsigned long buf[100]; };
12859 * Returns zero on success, or -EFAULT on error.
12860 */
12861
12862+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12863+#define __put_user(x, ptr) put_user((x), (ptr))
12864+#else
12865 #define __put_user(x, ptr) \
12866 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12867+#endif
12868
12869 #define __get_user_unaligned __get_user
12870 #define __put_user_unaligned __put_user
12871@@ -535,7 +595,7 @@ struct __large_struct { unsigned long buf[100]; };
12872 #define get_user_ex(x, ptr) do { \
12873 unsigned long __gue_val; \
12874 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12875- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12876+ (x) = (__typeof__(*(ptr)))__gue_val; \
12877 } while (0)
12878
12879 #ifdef CONFIG_X86_WP_WORKS_OK
12880diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12881index 576e39b..ccd0a39 100644
12882--- a/arch/x86/include/asm/uaccess_32.h
12883+++ b/arch/x86/include/asm/uaccess_32.h
12884@@ -11,15 +11,15 @@
12885 #include <asm/page.h>
12886
12887 unsigned long __must_check __copy_to_user_ll
12888- (void __user *to, const void *from, unsigned long n);
12889+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12890 unsigned long __must_check __copy_from_user_ll
12891- (void *to, const void __user *from, unsigned long n);
12892+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12893 unsigned long __must_check __copy_from_user_ll_nozero
12894- (void *to, const void __user *from, unsigned long n);
12895+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12896 unsigned long __must_check __copy_from_user_ll_nocache
12897- (void *to, const void __user *from, unsigned long n);
12898+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12899 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12900- (void *to, const void __user *from, unsigned long n);
12901+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12902
12903 /**
12904 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12905@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12906 static __always_inline unsigned long __must_check
12907 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12908 {
12909+ if ((long)n < 0)
12910+ return n;
12911+
12912 if (__builtin_constant_p(n)) {
12913 unsigned long ret;
12914
12915@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12916 return ret;
12917 }
12918 }
12919+ if (!__builtin_constant_p(n))
12920+ check_object_size(from, n, true);
12921 return __copy_to_user_ll(to, from, n);
12922 }
12923
12924@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
12925 __copy_to_user(void __user *to, const void *from, unsigned long n)
12926 {
12927 might_fault();
12928+
12929 return __copy_to_user_inatomic(to, from, n);
12930 }
12931
12932 static __always_inline unsigned long
12933 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12934 {
12935+ if ((long)n < 0)
12936+ return n;
12937+
12938 /* Avoid zeroing the tail if the copy fails..
12939 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12940 * but as the zeroing behaviour is only significant when n is not
12941@@ -137,6 +146,10 @@ static __always_inline unsigned long
12942 __copy_from_user(void *to, const void __user *from, unsigned long n)
12943 {
12944 might_fault();
12945+
12946+ if ((long)n < 0)
12947+ return n;
12948+
12949 if (__builtin_constant_p(n)) {
12950 unsigned long ret;
12951
12952@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12953 return ret;
12954 }
12955 }
12956+ if (!__builtin_constant_p(n))
12957+ check_object_size(to, n, false);
12958 return __copy_from_user_ll(to, from, n);
12959 }
12960
12961@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12962 const void __user *from, unsigned long n)
12963 {
12964 might_fault();
12965+
12966+ if ((long)n < 0)
12967+ return n;
12968+
12969 if (__builtin_constant_p(n)) {
12970 unsigned long ret;
12971
12972@@ -181,15 +200,19 @@ static __always_inline unsigned long
12973 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12974 unsigned long n)
12975 {
12976- return __copy_from_user_ll_nocache_nozero(to, from, n);
12977+ if ((long)n < 0)
12978+ return n;
12979+
12980+ return __copy_from_user_ll_nocache_nozero(to, from, n);
12981 }
12982
12983-unsigned long __must_check copy_to_user(void __user *to,
12984- const void *from, unsigned long n);
12985-unsigned long __must_check _copy_from_user(void *to,
12986- const void __user *from,
12987- unsigned long n);
12988-
12989+extern void copy_to_user_overflow(void)
12990+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12991+ __compiletime_error("copy_to_user() buffer size is not provably correct")
12992+#else
12993+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
12994+#endif
12995+;
12996
12997 extern void copy_from_user_overflow(void)
12998 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12999@@ -199,21 +222,65 @@ extern void copy_from_user_overflow(void)
13000 #endif
13001 ;
13002
13003-static inline unsigned long __must_check copy_from_user(void *to,
13004- const void __user *from,
13005- unsigned long n)
13006+/**
13007+ * copy_to_user: - Copy a block of data into user space.
13008+ * @to: Destination address, in user space.
13009+ * @from: Source address, in kernel space.
13010+ * @n: Number of bytes to copy.
13011+ *
13012+ * Context: User context only. This function may sleep.
13013+ *
13014+ * Copy data from kernel space to user space.
13015+ *
13016+ * Returns number of bytes that could not be copied.
13017+ * On success, this will be zero.
13018+ */
13019+static inline unsigned long __must_check
13020+copy_to_user(void __user *to, const void *from, unsigned long n)
13021 {
13022- int sz = __compiletime_object_size(to);
13023+ size_t sz = __compiletime_object_size(from);
13024
13025- if (likely(sz == -1 || sz >= n))
13026- n = _copy_from_user(to, from, n);
13027- else
13028+ if (unlikely(sz != (size_t)-1 && sz < n))
13029+ copy_to_user_overflow();
13030+ else if (access_ok(VERIFY_WRITE, to, n))
13031+ n = __copy_to_user(to, from, n);
13032+ return n;
13033+}
13034+
13035+/**
13036+ * copy_from_user: - Copy a block of data from user space.
13037+ * @to: Destination address, in kernel space.
13038+ * @from: Source address, in user space.
13039+ * @n: Number of bytes to copy.
13040+ *
13041+ * Context: User context only. This function may sleep.
13042+ *
13043+ * Copy data from user space to kernel space.
13044+ *
13045+ * Returns number of bytes that could not be copied.
13046+ * On success, this will be zero.
13047+ *
13048+ * If some data could not be copied, this function will pad the copied
13049+ * data to the requested size using zero bytes.
13050+ */
13051+static inline unsigned long __must_check
13052+copy_from_user(void *to, const void __user *from, unsigned long n)
13053+{
13054+ size_t sz = __compiletime_object_size(to);
13055+
13056+ if (unlikely(sz != (size_t)-1 && sz < n))
13057 copy_from_user_overflow();
13058-
13059+ else if (access_ok(VERIFY_READ, from, n))
13060+ n = __copy_from_user(to, from, n);
13061+ else if ((long)n > 0) {
13062+ if (!__builtin_constant_p(n))
13063+ check_object_size(to, n, false);
13064+ memset(to, 0, n);
13065+ }
13066 return n;
13067 }
13068
13069-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13070-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13071+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13072+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13073
13074 #endif /* _ASM_X86_UACCESS_32_H */
13075diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13076index 8e796fb..468c55a 100644
13077--- a/arch/x86/include/asm/uaccess_64.h
13078+++ b/arch/x86/include/asm/uaccess_64.h
13079@@ -10,6 +10,9 @@
13080 #include <asm/alternative.h>
13081 #include <asm/cpufeature.h>
13082 #include <asm/page.h>
13083+#include <asm/pgtable.h>
13084+
13085+#define set_fs(x) (current_thread_info()->addr_limit = (x))
13086
13087 /*
13088 * Copy To/From Userspace
13089@@ -17,12 +20,12 @@
13090
13091 /* Handles exceptions in both to and from, but doesn't do access_ok */
13092 __must_check unsigned long
13093-copy_user_generic_string(void *to, const void *from, unsigned len);
13094+copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
13095 __must_check unsigned long
13096-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
13097+copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
13098
13099-static __always_inline __must_check unsigned long
13100-copy_user_generic(void *to, const void *from, unsigned len)
13101+static __always_inline __must_check __size_overflow(3) unsigned long
13102+copy_user_generic(void *to, const void *from, unsigned long len)
13103 {
13104 unsigned ret;
13105
13106@@ -32,142 +35,238 @@ copy_user_generic(void *to, const void *from, unsigned len)
13107 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
13108 "=d" (len)),
13109 "1" (to), "2" (from), "3" (len)
13110- : "memory", "rcx", "r8", "r9", "r10", "r11");
13111+ : "memory", "rcx", "r8", "r9", "r11");
13112 return ret;
13113 }
13114
13115+static __always_inline __must_check unsigned long
13116+__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13117+static __always_inline __must_check unsigned long
13118+__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13119 __must_check unsigned long
13120-_copy_to_user(void __user *to, const void *from, unsigned len);
13121-__must_check unsigned long
13122-_copy_from_user(void *to, const void __user *from, unsigned len);
13123-__must_check unsigned long
13124-copy_in_user(void __user *to, const void __user *from, unsigned len);
13125+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13126+
13127+extern void copy_to_user_overflow(void)
13128+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13129+ __compiletime_error("copy_to_user() buffer size is not provably correct")
13130+#else
13131+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
13132+#endif
13133+;
13134+
13135+extern void copy_from_user_overflow(void)
13136+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13137+ __compiletime_error("copy_from_user() buffer size is not provably correct")
13138+#else
13139+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
13140+#endif
13141+;
13142
13143 static inline unsigned long __must_check copy_from_user(void *to,
13144 const void __user *from,
13145 unsigned long n)
13146 {
13147- int sz = __compiletime_object_size(to);
13148-
13149 might_fault();
13150- if (likely(sz == -1 || sz >= n))
13151- n = _copy_from_user(to, from, n);
13152-#ifdef CONFIG_DEBUG_VM
13153- else
13154- WARN(1, "Buffer overflow detected!\n");
13155-#endif
13156+
13157+ if (access_ok(VERIFY_READ, from, n))
13158+ n = __copy_from_user(to, from, n);
13159+ else if (n < INT_MAX) {
13160+ if (!__builtin_constant_p(n))
13161+ check_object_size(to, n, false);
13162+ memset(to, 0, n);
13163+ }
13164 return n;
13165 }
13166
13167 static __always_inline __must_check
13168-int copy_to_user(void __user *dst, const void *src, unsigned size)
13169+int copy_to_user(void __user *dst, const void *src, unsigned long size)
13170 {
13171 might_fault();
13172
13173- return _copy_to_user(dst, src, size);
13174+ if (access_ok(VERIFY_WRITE, dst, size))
13175+ size = __copy_to_user(dst, src, size);
13176+ return size;
13177 }
13178
13179 static __always_inline __must_check
13180-int __copy_from_user(void *dst, const void __user *src, unsigned size)
13181+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13182 {
13183- int ret = 0;
13184+ size_t sz = __compiletime_object_size(dst);
13185+ unsigned ret = 0;
13186
13187 might_fault();
13188- if (!__builtin_constant_p(size))
13189- return copy_user_generic(dst, (__force void *)src, size);
13190+
13191+ if (size > INT_MAX)
13192+ return size;
13193+
13194+#ifdef CONFIG_PAX_MEMORY_UDEREF
13195+ if (!__access_ok(VERIFY_READ, src, size))
13196+ return size;
13197+#endif
13198+
13199+ if (unlikely(sz != (size_t)-1 && sz < size)) {
13200+ copy_from_user_overflow();
13201+ return size;
13202+ }
13203+
13204+ if (!__builtin_constant_p(size)) {
13205+ check_object_size(dst, size, false);
13206+
13207+#ifdef CONFIG_PAX_MEMORY_UDEREF
13208+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13209+ src += PAX_USER_SHADOW_BASE;
13210+#endif
13211+
13212+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13213+ }
13214 switch (size) {
13215- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13216+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13217 ret, "b", "b", "=q", 1);
13218 return ret;
13219- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13220+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13221 ret, "w", "w", "=r", 2);
13222 return ret;
13223- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13224+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13225 ret, "l", "k", "=r", 4);
13226 return ret;
13227- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13228+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13229 ret, "q", "", "=r", 8);
13230 return ret;
13231 case 10:
13232- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13233+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13234 ret, "q", "", "=r", 10);
13235 if (unlikely(ret))
13236 return ret;
13237 __get_user_asm(*(u16 *)(8 + (char *)dst),
13238- (u16 __user *)(8 + (char __user *)src),
13239+ (const u16 __user *)(8 + (const char __user *)src),
13240 ret, "w", "w", "=r", 2);
13241 return ret;
13242 case 16:
13243- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13244+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13245 ret, "q", "", "=r", 16);
13246 if (unlikely(ret))
13247 return ret;
13248 __get_user_asm(*(u64 *)(8 + (char *)dst),
13249- (u64 __user *)(8 + (char __user *)src),
13250+ (const u64 __user *)(8 + (const char __user *)src),
13251 ret, "q", "", "=r", 8);
13252 return ret;
13253 default:
13254- return copy_user_generic(dst, (__force void *)src, size);
13255+
13256+#ifdef CONFIG_PAX_MEMORY_UDEREF
13257+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13258+ src += PAX_USER_SHADOW_BASE;
13259+#endif
13260+
13261+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13262 }
13263 }
13264
13265 static __always_inline __must_check
13266-int __copy_to_user(void __user *dst, const void *src, unsigned size)
13267+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13268 {
13269- int ret = 0;
13270+ size_t sz = __compiletime_object_size(src);
13271+ unsigned ret = 0;
13272
13273 might_fault();
13274- if (!__builtin_constant_p(size))
13275- return copy_user_generic((__force void *)dst, src, size);
13276+
13277+ if (size > INT_MAX)
13278+ return size;
13279+
13280+#ifdef CONFIG_PAX_MEMORY_UDEREF
13281+ if (!__access_ok(VERIFY_WRITE, dst, size))
13282+ return size;
13283+#endif
13284+
13285+ if (unlikely(sz != (size_t)-1 && sz < size)) {
13286+ copy_to_user_overflow();
13287+ return size;
13288+ }
13289+
13290+ if (!__builtin_constant_p(size)) {
13291+ check_object_size(src, size, true);
13292+
13293+#ifdef CONFIG_PAX_MEMORY_UDEREF
13294+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13295+ dst += PAX_USER_SHADOW_BASE;
13296+#endif
13297+
13298+ return copy_user_generic((__force_kernel void *)dst, src, size);
13299+ }
13300 switch (size) {
13301- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13302+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13303 ret, "b", "b", "iq", 1);
13304 return ret;
13305- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13306+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13307 ret, "w", "w", "ir", 2);
13308 return ret;
13309- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13310+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13311 ret, "l", "k", "ir", 4);
13312 return ret;
13313- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13314+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13315 ret, "q", "", "er", 8);
13316 return ret;
13317 case 10:
13318- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13319+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13320 ret, "q", "", "er", 10);
13321 if (unlikely(ret))
13322 return ret;
13323 asm("":::"memory");
13324- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13325+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13326 ret, "w", "w", "ir", 2);
13327 return ret;
13328 case 16:
13329- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13330+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13331 ret, "q", "", "er", 16);
13332 if (unlikely(ret))
13333 return ret;
13334 asm("":::"memory");
13335- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13336+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13337 ret, "q", "", "er", 8);
13338 return ret;
13339 default:
13340- return copy_user_generic((__force void *)dst, src, size);
13341+
13342+#ifdef CONFIG_PAX_MEMORY_UDEREF
13343+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13344+ dst += PAX_USER_SHADOW_BASE;
13345+#endif
13346+
13347+ return copy_user_generic((__force_kernel void *)dst, src, size);
13348 }
13349 }
13350
13351 static __always_inline __must_check
13352-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13353+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13354 {
13355- int ret = 0;
13356+ unsigned ret = 0;
13357
13358 might_fault();
13359- if (!__builtin_constant_p(size))
13360- return copy_user_generic((__force void *)dst,
13361- (__force void *)src, size);
13362+
13363+ if (size > INT_MAX)
13364+ return size;
13365+
13366+#ifdef CONFIG_PAX_MEMORY_UDEREF
13367+ if (!__access_ok(VERIFY_READ, src, size))
13368+ return size;
13369+ if (!__access_ok(VERIFY_WRITE, dst, size))
13370+ return size;
13371+#endif
13372+
13373+ if (!__builtin_constant_p(size)) {
13374+
13375+#ifdef CONFIG_PAX_MEMORY_UDEREF
13376+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13377+ src += PAX_USER_SHADOW_BASE;
13378+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13379+ dst += PAX_USER_SHADOW_BASE;
13380+#endif
13381+
13382+ return copy_user_generic((__force_kernel void *)dst,
13383+ (__force_kernel const void *)src, size);
13384+ }
13385 switch (size) {
13386 case 1: {
13387 u8 tmp;
13388- __get_user_asm(tmp, (u8 __user *)src,
13389+ __get_user_asm(tmp, (const u8 __user *)src,
13390 ret, "b", "b", "=q", 1);
13391 if (likely(!ret))
13392 __put_user_asm(tmp, (u8 __user *)dst,
13393@@ -176,7 +275,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13394 }
13395 case 2: {
13396 u16 tmp;
13397- __get_user_asm(tmp, (u16 __user *)src,
13398+ __get_user_asm(tmp, (const u16 __user *)src,
13399 ret, "w", "w", "=r", 2);
13400 if (likely(!ret))
13401 __put_user_asm(tmp, (u16 __user *)dst,
13402@@ -186,7 +285,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13403
13404 case 4: {
13405 u32 tmp;
13406- __get_user_asm(tmp, (u32 __user *)src,
13407+ __get_user_asm(tmp, (const u32 __user *)src,
13408 ret, "l", "k", "=r", 4);
13409 if (likely(!ret))
13410 __put_user_asm(tmp, (u32 __user *)dst,
13411@@ -195,7 +294,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13412 }
13413 case 8: {
13414 u64 tmp;
13415- __get_user_asm(tmp, (u64 __user *)src,
13416+ __get_user_asm(tmp, (const u64 __user *)src,
13417 ret, "q", "", "=r", 8);
13418 if (likely(!ret))
13419 __put_user_asm(tmp, (u64 __user *)dst,
13420@@ -203,44 +302,89 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13421 return ret;
13422 }
13423 default:
13424- return copy_user_generic((__force void *)dst,
13425- (__force void *)src, size);
13426+
13427+#ifdef CONFIG_PAX_MEMORY_UDEREF
13428+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13429+ src += PAX_USER_SHADOW_BASE;
13430+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13431+ dst += PAX_USER_SHADOW_BASE;
13432+#endif
13433+
13434+ return copy_user_generic((__force_kernel void *)dst,
13435+ (__force_kernel const void *)src, size);
13436 }
13437 }
13438
13439-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
13440-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13441+__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13442+__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13443
13444 static __must_check __always_inline int
13445-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
13446+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13447 {
13448- return copy_user_generic(dst, (__force const void *)src, size);
13449+ if (size > INT_MAX)
13450+ return size;
13451+
13452+#ifdef CONFIG_PAX_MEMORY_UDEREF
13453+ if (!__access_ok(VERIFY_READ, src, size))
13454+ return size;
13455+
13456+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13457+ src += PAX_USER_SHADOW_BASE;
13458+#endif
13459+
13460+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13461 }
13462
13463-static __must_check __always_inline int
13464-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13465+static __must_check __always_inline unsigned long
13466+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13467 {
13468- return copy_user_generic((__force void *)dst, src, size);
13469+ if (size > INT_MAX)
13470+ return size;
13471+
13472+#ifdef CONFIG_PAX_MEMORY_UDEREF
13473+ if (!__access_ok(VERIFY_WRITE, dst, size))
13474+ return size;
13475+
13476+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13477+ dst += PAX_USER_SHADOW_BASE;
13478+#endif
13479+
13480+ return copy_user_generic((__force_kernel void *)dst, src, size);
13481 }
13482
13483-extern long __copy_user_nocache(void *dst, const void __user *src,
13484- unsigned size, int zerorest);
13485+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13486+ unsigned long size, int zerorest) __size_overflow(3);
13487
13488-static inline int
13489-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13490+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13491 {
13492 might_sleep();
13493+
13494+ if (size > INT_MAX)
13495+ return size;
13496+
13497+#ifdef CONFIG_PAX_MEMORY_UDEREF
13498+ if (!__access_ok(VERIFY_READ, src, size))
13499+ return size;
13500+#endif
13501+
13502 return __copy_user_nocache(dst, src, size, 1);
13503 }
13504
13505-static inline int
13506-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13507- unsigned size)
13508+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13509+ unsigned long size)
13510 {
13511+ if (size > INT_MAX)
13512+ return size;
13513+
13514+#ifdef CONFIG_PAX_MEMORY_UDEREF
13515+ if (!__access_ok(VERIFY_READ, src, size))
13516+ return size;
13517+#endif
13518+
13519 return __copy_user_nocache(dst, src, size, 0);
13520 }
13521
13522-unsigned long
13523-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13524+extern unsigned long
13525+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
13526
13527 #endif /* _ASM_X86_UACCESS_64_H */
13528diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13529index bb05228..d763d5b 100644
13530--- a/arch/x86/include/asm/vdso.h
13531+++ b/arch/x86/include/asm/vdso.h
13532@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13533 #define VDSO32_SYMBOL(base, name) \
13534 ({ \
13535 extern const char VDSO32_##name[]; \
13536- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13537+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13538 })
13539 #endif
13540
13541diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
13542index 5b238981..77fdd78 100644
13543--- a/arch/x86/include/asm/word-at-a-time.h
13544+++ b/arch/x86/include/asm/word-at-a-time.h
13545@@ -11,7 +11,7 @@
13546 * and shift, for example.
13547 */
13548 struct word_at_a_time {
13549- const unsigned long one_bits, high_bits;
13550+ unsigned long one_bits, high_bits;
13551 };
13552
13553 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
13554diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13555index c090af1..7e7bf16 100644
13556--- a/arch/x86/include/asm/x86_init.h
13557+++ b/arch/x86/include/asm/x86_init.h
13558@@ -29,7 +29,7 @@ struct x86_init_mpparse {
13559 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13560 void (*find_smp_config)(void);
13561 void (*get_smp_config)(unsigned int early);
13562-};
13563+} __no_const;
13564
13565 /**
13566 * struct x86_init_resources - platform specific resource related ops
13567@@ -43,7 +43,7 @@ struct x86_init_resources {
13568 void (*probe_roms)(void);
13569 void (*reserve_resources)(void);
13570 char *(*memory_setup)(void);
13571-};
13572+} __no_const;
13573
13574 /**
13575 * struct x86_init_irqs - platform specific interrupt setup
13576@@ -56,7 +56,7 @@ struct x86_init_irqs {
13577 void (*pre_vector_init)(void);
13578 void (*intr_init)(void);
13579 void (*trap_init)(void);
13580-};
13581+} __no_const;
13582
13583 /**
13584 * struct x86_init_oem - oem platform specific customizing functions
13585@@ -66,7 +66,7 @@ struct x86_init_irqs {
13586 struct x86_init_oem {
13587 void (*arch_setup)(void);
13588 void (*banner)(void);
13589-};
13590+} __no_const;
13591
13592 /**
13593 * struct x86_init_mapping - platform specific initial kernel pagetable setup
13594@@ -77,7 +77,7 @@ struct x86_init_oem {
13595 */
13596 struct x86_init_mapping {
13597 void (*pagetable_reserve)(u64 start, u64 end);
13598-};
13599+} __no_const;
13600
13601 /**
13602 * struct x86_init_paging - platform specific paging functions
13603@@ -87,7 +87,7 @@ struct x86_init_mapping {
13604 struct x86_init_paging {
13605 void (*pagetable_setup_start)(pgd_t *base);
13606 void (*pagetable_setup_done)(pgd_t *base);
13607-};
13608+} __no_const;
13609
13610 /**
13611 * struct x86_init_timers - platform specific timer setup
13612@@ -102,7 +102,7 @@ struct x86_init_timers {
13613 void (*tsc_pre_init)(void);
13614 void (*timer_init)(void);
13615 void (*wallclock_init)(void);
13616-};
13617+} __no_const;
13618
13619 /**
13620 * struct x86_init_iommu - platform specific iommu setup
13621@@ -110,7 +110,7 @@ struct x86_init_timers {
13622 */
13623 struct x86_init_iommu {
13624 int (*iommu_init)(void);
13625-};
13626+} __no_const;
13627
13628 /**
13629 * struct x86_init_pci - platform specific pci init functions
13630@@ -124,7 +124,7 @@ struct x86_init_pci {
13631 int (*init)(void);
13632 void (*init_irq)(void);
13633 void (*fixup_irqs)(void);
13634-};
13635+} __no_const;
13636
13637 /**
13638 * struct x86_init_ops - functions for platform specific setup
13639@@ -140,7 +140,7 @@ struct x86_init_ops {
13640 struct x86_init_timers timers;
13641 struct x86_init_iommu iommu;
13642 struct x86_init_pci pci;
13643-};
13644+} __no_const;
13645
13646 /**
13647 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13648@@ -151,7 +151,7 @@ struct x86_cpuinit_ops {
13649 void (*setup_percpu_clockev)(void);
13650 void (*early_percpu_clock_init)(void);
13651 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
13652-};
13653+} __no_const;
13654
13655 /**
13656 * struct x86_platform_ops - platform specific runtime functions
13657@@ -177,7 +177,7 @@ struct x86_platform_ops {
13658 int (*i8042_detect)(void);
13659 void (*save_sched_clock_state)(void);
13660 void (*restore_sched_clock_state)(void);
13661-};
13662+} __no_const;
13663
13664 struct pci_dev;
13665
13666@@ -186,14 +186,14 @@ struct x86_msi_ops {
13667 void (*teardown_msi_irq)(unsigned int irq);
13668 void (*teardown_msi_irqs)(struct pci_dev *dev);
13669 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
13670-};
13671+} __no_const;
13672
13673 struct x86_io_apic_ops {
13674 void (*init) (void);
13675 unsigned int (*read) (unsigned int apic, unsigned int reg);
13676 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
13677 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
13678-};
13679+} __no_const;
13680
13681 extern struct x86_init_ops x86_init;
13682 extern struct x86_cpuinit_ops x86_cpuinit;
13683diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13684index 8a1b6f9..a29c4e4 100644
13685--- a/arch/x86/include/asm/xsave.h
13686+++ b/arch/x86/include/asm/xsave.h
13687@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13688 {
13689 int err;
13690
13691+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13692+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13693+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13694+#endif
13695+
13696 /*
13697 * Clear the xsave header first, so that reserved fields are
13698 * initialized to zero.
13699@@ -93,10 +98,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13700 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13701 {
13702 int err;
13703- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13704+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13705 u32 lmask = mask;
13706 u32 hmask = mask >> 32;
13707
13708+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13709+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13710+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13711+#endif
13712+
13713 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13714 "2:\n"
13715 ".section .fixup,\"ax\"\n"
13716diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13717index 95bf99de..de9235c 100644
13718--- a/arch/x86/kernel/acpi/sleep.c
13719+++ b/arch/x86/kernel/acpi/sleep.c
13720@@ -73,8 +73,12 @@ int acpi_suspend_lowlevel(void)
13721 #else /* CONFIG_64BIT */
13722 #ifdef CONFIG_SMP
13723 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13724+
13725+ pax_open_kernel();
13726 early_gdt_descr.address =
13727 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13728+ pax_close_kernel();
13729+
13730 initial_gs = per_cpu_offset(smp_processor_id());
13731 #endif
13732 initial_code = (unsigned long)wakeup_long64;
13733diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13734index 7261083..5c12053 100644
13735--- a/arch/x86/kernel/acpi/wakeup_32.S
13736+++ b/arch/x86/kernel/acpi/wakeup_32.S
13737@@ -30,13 +30,11 @@ wakeup_pmode_return:
13738 # and restore the stack ... but you need gdt for this to work
13739 movl saved_context_esp, %esp
13740
13741- movl %cs:saved_magic, %eax
13742- cmpl $0x12345678, %eax
13743+ cmpl $0x12345678, saved_magic
13744 jne bogus_magic
13745
13746 # jump to place where we left off
13747- movl saved_eip, %eax
13748- jmp *%eax
13749+ jmp *(saved_eip)
13750
13751 bogus_magic:
13752 jmp bogus_magic
13753diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13754index 73ef56c..0238021 100644
13755--- a/arch/x86/kernel/alternative.c
13756+++ b/arch/x86/kernel/alternative.c
13757@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13758 */
13759 for (a = start; a < end; a++) {
13760 instr = (u8 *)&a->instr_offset + a->instr_offset;
13761+
13762+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13763+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13764+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13765+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13766+#endif
13767+
13768 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13769 BUG_ON(a->replacementlen > a->instrlen);
13770 BUG_ON(a->instrlen > sizeof(insnbuf));
13771@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13772 for (poff = start; poff < end; poff++) {
13773 u8 *ptr = (u8 *)poff + *poff;
13774
13775+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13776+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13777+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13778+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13779+#endif
13780+
13781 if (!*poff || ptr < text || ptr >= text_end)
13782 continue;
13783 /* turn DS segment override prefix into lock prefix */
13784- if (*ptr == 0x3e)
13785+ if (*ktla_ktva(ptr) == 0x3e)
13786 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13787 };
13788 mutex_unlock(&text_mutex);
13789@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13790 for (poff = start; poff < end; poff++) {
13791 u8 *ptr = (u8 *)poff + *poff;
13792
13793+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13794+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13795+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13796+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13797+#endif
13798+
13799 if (!*poff || ptr < text || ptr >= text_end)
13800 continue;
13801 /* turn lock prefix into DS segment override prefix */
13802- if (*ptr == 0xf0)
13803+ if (*ktla_ktva(ptr) == 0xf0)
13804 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13805 };
13806 mutex_unlock(&text_mutex);
13807@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13808
13809 BUG_ON(p->len > MAX_PATCH_LEN);
13810 /* prep the buffer with the original instructions */
13811- memcpy(insnbuf, p->instr, p->len);
13812+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13813 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13814 (unsigned long)p->instr, p->len);
13815
13816@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13817 if (smp_alt_once)
13818 free_init_pages("SMP alternatives",
13819 (unsigned long)__smp_locks,
13820- (unsigned long)__smp_locks_end);
13821+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13822
13823 restart_nmi();
13824 }
13825@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13826 * instructions. And on the local CPU you need to be protected again NMI or MCE
13827 * handlers seeing an inconsistent instruction while you patch.
13828 */
13829-void *__init_or_module text_poke_early(void *addr, const void *opcode,
13830+void *__kprobes text_poke_early(void *addr, const void *opcode,
13831 size_t len)
13832 {
13833 unsigned long flags;
13834 local_irq_save(flags);
13835- memcpy(addr, opcode, len);
13836+
13837+ pax_open_kernel();
13838+ memcpy(ktla_ktva(addr), opcode, len);
13839 sync_core();
13840+ pax_close_kernel();
13841+
13842 local_irq_restore(flags);
13843 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13844 that causes hangs on some VIA CPUs. */
13845@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13846 */
13847 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13848 {
13849- unsigned long flags;
13850- char *vaddr;
13851+ unsigned char *vaddr = ktla_ktva(addr);
13852 struct page *pages[2];
13853- int i;
13854+ size_t i;
13855
13856 if (!core_kernel_text((unsigned long)addr)) {
13857- pages[0] = vmalloc_to_page(addr);
13858- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13859+ pages[0] = vmalloc_to_page(vaddr);
13860+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13861 } else {
13862- pages[0] = virt_to_page(addr);
13863+ pages[0] = virt_to_page(vaddr);
13864 WARN_ON(!PageReserved(pages[0]));
13865- pages[1] = virt_to_page(addr + PAGE_SIZE);
13866+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13867 }
13868 BUG_ON(!pages[0]);
13869- local_irq_save(flags);
13870- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13871- if (pages[1])
13872- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13873- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13874- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13875- clear_fixmap(FIX_TEXT_POKE0);
13876- if (pages[1])
13877- clear_fixmap(FIX_TEXT_POKE1);
13878- local_flush_tlb();
13879- sync_core();
13880- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13881- that causes hangs on some VIA CPUs. */
13882+ text_poke_early(addr, opcode, len);
13883 for (i = 0; i < len; i++)
13884- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13885- local_irq_restore(flags);
13886+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13887 return addr;
13888 }
13889
13890diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13891index 39a222e..85a7767 100644
13892--- a/arch/x86/kernel/apic/apic.c
13893+++ b/arch/x86/kernel/apic/apic.c
13894@@ -185,7 +185,7 @@ int first_system_vector = 0xfe;
13895 /*
13896 * Debug level, exported for io_apic.c
13897 */
13898-unsigned int apic_verbosity;
13899+int apic_verbosity;
13900
13901 int pic_mode;
13902
13903@@ -1923,7 +1923,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13904 apic_write(APIC_ESR, 0);
13905 v1 = apic_read(APIC_ESR);
13906 ack_APIC_irq();
13907- atomic_inc(&irq_err_count);
13908+ atomic_inc_unchecked(&irq_err_count);
13909
13910 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13911 smp_processor_id(), v0 , v1);
13912diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13913index 5f0ff59..f9e01bc 100644
13914--- a/arch/x86/kernel/apic/io_apic.c
13915+++ b/arch/x86/kernel/apic/io_apic.c
13916@@ -1084,7 +1084,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13917 }
13918 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13919
13920-void lock_vector_lock(void)
13921+void lock_vector_lock(void) __acquires(vector_lock)
13922 {
13923 /* Used to the online set of cpus does not change
13924 * during assign_irq_vector.
13925@@ -1092,7 +1092,7 @@ void lock_vector_lock(void)
13926 raw_spin_lock(&vector_lock);
13927 }
13928
13929-void unlock_vector_lock(void)
13930+void unlock_vector_lock(void) __releases(vector_lock)
13931 {
13932 raw_spin_unlock(&vector_lock);
13933 }
13934@@ -2369,7 +2369,7 @@ static void ack_apic_edge(struct irq_data *data)
13935 ack_APIC_irq();
13936 }
13937
13938-atomic_t irq_mis_count;
13939+atomic_unchecked_t irq_mis_count;
13940
13941 #ifdef CONFIG_GENERIC_PENDING_IRQ
13942 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
13943@@ -2510,7 +2510,7 @@ static void ack_apic_level(struct irq_data *data)
13944 * at the cpu.
13945 */
13946 if (!(v & (1 << (i & 0x1f)))) {
13947- atomic_inc(&irq_mis_count);
13948+ atomic_inc_unchecked(&irq_mis_count);
13949
13950 eoi_ioapic_irq(irq, cfg);
13951 }
13952diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
13953index 3fe9866..6abf259 100644
13954--- a/arch/x86/kernel/apic/probe_64.c
13955+++ b/arch/x86/kernel/apic/probe_64.c
13956@@ -50,7 +50,7 @@ void __init default_setup_apic_routing(void)
13957
13958 if (is_vsmp_box()) {
13959 /* need to update phys_pkg_id */
13960- apic->phys_pkg_id = apicid_phys_pkg_id;
13961+ *(void **)&apic->phys_pkg_id = apicid_phys_pkg_id;
13962 }
13963 }
13964
13965diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13966index 07b0c0d..1df6f42 100644
13967--- a/arch/x86/kernel/apm_32.c
13968+++ b/arch/x86/kernel/apm_32.c
13969@@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
13970 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13971 * even though they are called in protected mode.
13972 */
13973-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13974+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13975 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13976
13977 static const char driver_version[] = "1.16ac"; /* no spaces */
13978@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13979 BUG_ON(cpu != 0);
13980 gdt = get_cpu_gdt_table(cpu);
13981 save_desc_40 = gdt[0x40 / 8];
13982+
13983+ pax_open_kernel();
13984 gdt[0x40 / 8] = bad_bios_desc;
13985+ pax_close_kernel();
13986
13987 apm_irq_save(flags);
13988 APM_DO_SAVE_SEGS;
13989@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13990 &call->esi);
13991 APM_DO_RESTORE_SEGS;
13992 apm_irq_restore(flags);
13993+
13994+ pax_open_kernel();
13995 gdt[0x40 / 8] = save_desc_40;
13996+ pax_close_kernel();
13997+
13998 put_cpu();
13999
14000 return call->eax & 0xff;
14001@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
14002 BUG_ON(cpu != 0);
14003 gdt = get_cpu_gdt_table(cpu);
14004 save_desc_40 = gdt[0x40 / 8];
14005+
14006+ pax_open_kernel();
14007 gdt[0x40 / 8] = bad_bios_desc;
14008+ pax_close_kernel();
14009
14010 apm_irq_save(flags);
14011 APM_DO_SAVE_SEGS;
14012@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
14013 &call->eax);
14014 APM_DO_RESTORE_SEGS;
14015 apm_irq_restore(flags);
14016+
14017+ pax_open_kernel();
14018 gdt[0x40 / 8] = save_desc_40;
14019+ pax_close_kernel();
14020+
14021 put_cpu();
14022 return error;
14023 }
14024@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
14025 * code to that CPU.
14026 */
14027 gdt = get_cpu_gdt_table(0);
14028+
14029+ pax_open_kernel();
14030 set_desc_base(&gdt[APM_CS >> 3],
14031 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14032 set_desc_base(&gdt[APM_CS_16 >> 3],
14033 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14034 set_desc_base(&gdt[APM_DS >> 3],
14035 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14036+ pax_close_kernel();
14037
14038 proc_create("apm", 0, NULL, &apm_file_ops);
14039
14040diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
14041index 68de2dc..1f3c720 100644
14042--- a/arch/x86/kernel/asm-offsets.c
14043+++ b/arch/x86/kernel/asm-offsets.c
14044@@ -33,6 +33,8 @@ void common(void) {
14045 OFFSET(TI_status, thread_info, status);
14046 OFFSET(TI_addr_limit, thread_info, addr_limit);
14047 OFFSET(TI_preempt_count, thread_info, preempt_count);
14048+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14049+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14050
14051 BLANK();
14052 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
14053@@ -53,8 +55,26 @@ void common(void) {
14054 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14055 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14056 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14057+
14058+#ifdef CONFIG_PAX_KERNEXEC
14059+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14060 #endif
14061
14062+#ifdef CONFIG_PAX_MEMORY_UDEREF
14063+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14064+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14065+#ifdef CONFIG_X86_64
14066+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14067+#endif
14068+#endif
14069+
14070+#endif
14071+
14072+ BLANK();
14073+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14074+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14075+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14076+
14077 #ifdef CONFIG_XEN
14078 BLANK();
14079 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14080diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14081index 1b4754f..fbb4227 100644
14082--- a/arch/x86/kernel/asm-offsets_64.c
14083+++ b/arch/x86/kernel/asm-offsets_64.c
14084@@ -76,6 +76,7 @@ int main(void)
14085 BLANK();
14086 #undef ENTRY
14087
14088+ DEFINE(TSS_size, sizeof(struct tss_struct));
14089 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
14090 BLANK();
14091
14092diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14093index 6ab6aa2..8f71507 100644
14094--- a/arch/x86/kernel/cpu/Makefile
14095+++ b/arch/x86/kernel/cpu/Makefile
14096@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
14097 CFLAGS_REMOVE_perf_event.o = -pg
14098 endif
14099
14100-# Make sure load_percpu_segment has no stackprotector
14101-nostackp := $(call cc-option, -fno-stack-protector)
14102-CFLAGS_common.o := $(nostackp)
14103-
14104 obj-y := intel_cacheinfo.o scattered.o topology.o
14105 obj-y += proc.o capflags.o powerflags.o common.o
14106 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
14107diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14108index 146bb62..ac9c74a 100644
14109--- a/arch/x86/kernel/cpu/amd.c
14110+++ b/arch/x86/kernel/cpu/amd.c
14111@@ -691,7 +691,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14112 unsigned int size)
14113 {
14114 /* AMD errata T13 (order #21922) */
14115- if ((c->x86 == 6)) {
14116+ if (c->x86 == 6) {
14117 /* Duron Rev A0 */
14118 if (c->x86_model == 3 && c->x86_mask == 0)
14119 size = 64;
14120diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14121index 6b9333b..4c3083a 100644
14122--- a/arch/x86/kernel/cpu/common.c
14123+++ b/arch/x86/kernel/cpu/common.c
14124@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14125
14126 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14127
14128-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14129-#ifdef CONFIG_X86_64
14130- /*
14131- * We need valid kernel segments for data and code in long mode too
14132- * IRET will check the segment types kkeil 2000/10/28
14133- * Also sysret mandates a special GDT layout
14134- *
14135- * TLS descriptors are currently at a different place compared to i386.
14136- * Hopefully nobody expects them at a fixed place (Wine?)
14137- */
14138- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14139- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14140- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14141- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14142- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14143- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14144-#else
14145- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14146- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14147- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14148- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14149- /*
14150- * Segments used for calling PnP BIOS have byte granularity.
14151- * They code segments and data segments have fixed 64k limits,
14152- * the transfer segment sizes are set at run time.
14153- */
14154- /* 32-bit code */
14155- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14156- /* 16-bit code */
14157- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14158- /* 16-bit data */
14159- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14160- /* 16-bit data */
14161- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14162- /* 16-bit data */
14163- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14164- /*
14165- * The APM segments have byte granularity and their bases
14166- * are set at run time. All have 64k limits.
14167- */
14168- /* 32-bit code */
14169- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14170- /* 16-bit code */
14171- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14172- /* data */
14173- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14174-
14175- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14176- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14177- GDT_STACK_CANARY_INIT
14178-#endif
14179-} };
14180-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14181-
14182 static int __init x86_xsave_setup(char *s)
14183 {
14184 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14185@@ -374,7 +320,7 @@ void switch_to_new_gdt(int cpu)
14186 {
14187 struct desc_ptr gdt_descr;
14188
14189- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14190+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14191 gdt_descr.size = GDT_SIZE - 1;
14192 load_gdt(&gdt_descr);
14193 /* Reload the per-cpu base */
14194@@ -841,6 +787,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14195 /* Filter out anything that depends on CPUID levels we don't have */
14196 filter_cpuid_features(c, true);
14197
14198+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14199+ setup_clear_cpu_cap(X86_FEATURE_SEP);
14200+#endif
14201+
14202 /* If the model name is still unset, do table lookup. */
14203 if (!c->x86_model_id[0]) {
14204 const char *p;
14205@@ -1021,10 +971,12 @@ static __init int setup_disablecpuid(char *arg)
14206 }
14207 __setup("clearcpuid=", setup_disablecpuid);
14208
14209+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14210+EXPORT_PER_CPU_SYMBOL(current_tinfo);
14211+
14212 #ifdef CONFIG_X86_64
14213 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14214-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
14215- (unsigned long) nmi_idt_table };
14216+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
14217
14218 DEFINE_PER_CPU_FIRST(union irq_stack_union,
14219 irq_stack_union) __aligned(PAGE_SIZE);
14220@@ -1038,7 +990,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14221 EXPORT_PER_CPU_SYMBOL(current_task);
14222
14223 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14224- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14225+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14226 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14227
14228 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14229@@ -1132,7 +1084,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14230 {
14231 memset(regs, 0, sizeof(struct pt_regs));
14232 regs->fs = __KERNEL_PERCPU;
14233- regs->gs = __KERNEL_STACK_CANARY;
14234+ savesegment(gs, regs->gs);
14235
14236 return regs;
14237 }
14238@@ -1187,7 +1139,7 @@ void __cpuinit cpu_init(void)
14239 int i;
14240
14241 cpu = stack_smp_processor_id();
14242- t = &per_cpu(init_tss, cpu);
14243+ t = init_tss + cpu;
14244 oist = &per_cpu(orig_ist, cpu);
14245
14246 #ifdef CONFIG_NUMA
14247@@ -1213,7 +1165,7 @@ void __cpuinit cpu_init(void)
14248 switch_to_new_gdt(cpu);
14249 loadsegment(fs, 0);
14250
14251- load_idt((const struct desc_ptr *)&idt_descr);
14252+ load_idt(&idt_descr);
14253
14254 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14255 syscall_init();
14256@@ -1222,7 +1174,6 @@ void __cpuinit cpu_init(void)
14257 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14258 barrier();
14259
14260- x86_configure_nx();
14261 if (cpu != 0)
14262 enable_x2apic();
14263
14264@@ -1278,7 +1229,7 @@ void __cpuinit cpu_init(void)
14265 {
14266 int cpu = smp_processor_id();
14267 struct task_struct *curr = current;
14268- struct tss_struct *t = &per_cpu(init_tss, cpu);
14269+ struct tss_struct *t = init_tss + cpu;
14270 struct thread_struct *thread = &curr->thread;
14271
14272 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
14273diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
14274index 3e6ff6c..54b4992 100644
14275--- a/arch/x86/kernel/cpu/intel.c
14276+++ b/arch/x86/kernel/cpu/intel.c
14277@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14278 * Update the IDT descriptor and reload the IDT so that
14279 * it uses the read-only mapped virtual address.
14280 */
14281- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14282+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14283 load_idt(&idt_descr);
14284 }
14285 #endif
14286diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14287index c46ed49..5dc0a53 100644
14288--- a/arch/x86/kernel/cpu/mcheck/mce.c
14289+++ b/arch/x86/kernel/cpu/mcheck/mce.c
14290@@ -42,6 +42,7 @@
14291 #include <asm/processor.h>
14292 #include <asm/mce.h>
14293 #include <asm/msr.h>
14294+#include <asm/local.h>
14295
14296 #include "mce-internal.h"
14297
14298@@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
14299 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14300 m->cs, m->ip);
14301
14302- if (m->cs == __KERNEL_CS)
14303+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14304 print_symbol("{%s}", m->ip);
14305 pr_cont("\n");
14306 }
14307@@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
14308
14309 #define PANIC_TIMEOUT 5 /* 5 seconds */
14310
14311-static atomic_t mce_paniced;
14312+static atomic_unchecked_t mce_paniced;
14313
14314 static int fake_panic;
14315-static atomic_t mce_fake_paniced;
14316+static atomic_unchecked_t mce_fake_paniced;
14317
14318 /* Panic in progress. Enable interrupts and wait for final IPI */
14319 static void wait_for_panic(void)
14320@@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14321 /*
14322 * Make sure only one CPU runs in machine check panic
14323 */
14324- if (atomic_inc_return(&mce_paniced) > 1)
14325+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14326 wait_for_panic();
14327 barrier();
14328
14329@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14330 console_verbose();
14331 } else {
14332 /* Don't log too much for fake panic */
14333- if (atomic_inc_return(&mce_fake_paniced) > 1)
14334+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14335 return;
14336 }
14337 /* First print corrected ones that are still unlogged */
14338@@ -686,7 +687,7 @@ static int mce_timed_out(u64 *t)
14339 * might have been modified by someone else.
14340 */
14341 rmb();
14342- if (atomic_read(&mce_paniced))
14343+ if (atomic_read_unchecked(&mce_paniced))
14344 wait_for_panic();
14345 if (!monarch_timeout)
14346 goto out;
14347@@ -1581,7 +1582,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14348 }
14349
14350 /* Call the installed machine check handler for this CPU setup. */
14351-void (*machine_check_vector)(struct pt_regs *, long error_code) =
14352+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14353 unexpected_machine_check;
14354
14355 /*
14356@@ -1604,7 +1605,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14357 return;
14358 }
14359
14360+ pax_open_kernel();
14361 machine_check_vector = do_machine_check;
14362+ pax_close_kernel();
14363
14364 __mcheck_cpu_init_generic();
14365 __mcheck_cpu_init_vendor(c);
14366@@ -1618,7 +1621,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14367 */
14368
14369 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
14370-static int mce_chrdev_open_count; /* #times opened */
14371+static local_t mce_chrdev_open_count; /* #times opened */
14372 static int mce_chrdev_open_exclu; /* already open exclusive? */
14373
14374 static int mce_chrdev_open(struct inode *inode, struct file *file)
14375@@ -1626,7 +1629,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14376 spin_lock(&mce_chrdev_state_lock);
14377
14378 if (mce_chrdev_open_exclu ||
14379- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
14380+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
14381 spin_unlock(&mce_chrdev_state_lock);
14382
14383 return -EBUSY;
14384@@ -1634,7 +1637,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14385
14386 if (file->f_flags & O_EXCL)
14387 mce_chrdev_open_exclu = 1;
14388- mce_chrdev_open_count++;
14389+ local_inc(&mce_chrdev_open_count);
14390
14391 spin_unlock(&mce_chrdev_state_lock);
14392
14393@@ -1645,7 +1648,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
14394 {
14395 spin_lock(&mce_chrdev_state_lock);
14396
14397- mce_chrdev_open_count--;
14398+ local_dec(&mce_chrdev_open_count);
14399 mce_chrdev_open_exclu = 0;
14400
14401 spin_unlock(&mce_chrdev_state_lock);
14402@@ -2370,7 +2373,7 @@ struct dentry *mce_get_debugfs_dir(void)
14403 static void mce_reset(void)
14404 {
14405 cpu_missing = 0;
14406- atomic_set(&mce_fake_paniced, 0);
14407+ atomic_set_unchecked(&mce_fake_paniced, 0);
14408 atomic_set(&mce_executing, 0);
14409 atomic_set(&mce_callin, 0);
14410 atomic_set(&global_nwo, 0);
14411diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14412index 2d5454c..51987eb 100644
14413--- a/arch/x86/kernel/cpu/mcheck/p5.c
14414+++ b/arch/x86/kernel/cpu/mcheck/p5.c
14415@@ -11,6 +11,7 @@
14416 #include <asm/processor.h>
14417 #include <asm/mce.h>
14418 #include <asm/msr.h>
14419+#include <asm/pgtable.h>
14420
14421 /* By default disabled */
14422 int mce_p5_enabled __read_mostly;
14423@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14424 if (!cpu_has(c, X86_FEATURE_MCE))
14425 return;
14426
14427+ pax_open_kernel();
14428 machine_check_vector = pentium_machine_check;
14429+ pax_close_kernel();
14430 /* Make sure the vector pointer is visible before we enable MCEs: */
14431 wmb();
14432
14433diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14434index 2d7998f..17c9de1 100644
14435--- a/arch/x86/kernel/cpu/mcheck/winchip.c
14436+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14437@@ -10,6 +10,7 @@
14438 #include <asm/processor.h>
14439 #include <asm/mce.h>
14440 #include <asm/msr.h>
14441+#include <asm/pgtable.h>
14442
14443 /* Machine check handler for WinChip C6: */
14444 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14445@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14446 {
14447 u32 lo, hi;
14448
14449+ pax_open_kernel();
14450 machine_check_vector = winchip_machine_check;
14451+ pax_close_kernel();
14452 /* Make sure the vector pointer is visible before we enable MCEs: */
14453 wmb();
14454
14455diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14456index 6b96110..0da73eb 100644
14457--- a/arch/x86/kernel/cpu/mtrr/main.c
14458+++ b/arch/x86/kernel/cpu/mtrr/main.c
14459@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
14460 u64 size_or_mask, size_and_mask;
14461 static bool mtrr_aps_delayed_init;
14462
14463-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14464+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14465
14466 const struct mtrr_ops *mtrr_if;
14467
14468diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14469index df5e41f..816c719 100644
14470--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14471+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14472@@ -25,7 +25,7 @@ struct mtrr_ops {
14473 int (*validate_add_page)(unsigned long base, unsigned long size,
14474 unsigned int type);
14475 int (*have_wrcomb)(void);
14476-};
14477+} __do_const;
14478
14479 extern int generic_get_free_region(unsigned long base, unsigned long size,
14480 int replace_reg);
14481diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14482index c4706cf..264b0f7 100644
14483--- a/arch/x86/kernel/cpu/perf_event.c
14484+++ b/arch/x86/kernel/cpu/perf_event.c
14485@@ -1837,7 +1837,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
14486 break;
14487
14488 perf_callchain_store(entry, frame.return_address);
14489- fp = frame.next_frame;
14490+ fp = (const void __force_user *)frame.next_frame;
14491 }
14492 }
14493
14494diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
14495index 187c294..28a069c 100644
14496--- a/arch/x86/kernel/cpu/perf_event_intel.c
14497+++ b/arch/x86/kernel/cpu/perf_event_intel.c
14498@@ -1811,10 +1811,10 @@ __init int intel_pmu_init(void)
14499 * v2 and above have a perf capabilities MSR
14500 */
14501 if (version > 1) {
14502- u64 capabilities;
14503+ u64 capabilities = x86_pmu.intel_cap.capabilities;
14504
14505- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
14506- x86_pmu.intel_cap.capabilities = capabilities;
14507+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
14508+ x86_pmu.intel_cap.capabilities = capabilities;
14509 }
14510
14511 intel_ds_init();
14512diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14513index 13ad899..f642b9a 100644
14514--- a/arch/x86/kernel/crash.c
14515+++ b/arch/x86/kernel/crash.c
14516@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
14517 {
14518 #ifdef CONFIG_X86_32
14519 struct pt_regs fixed_regs;
14520-#endif
14521
14522-#ifdef CONFIG_X86_32
14523- if (!user_mode_vm(regs)) {
14524+ if (!user_mode(regs)) {
14525 crash_fixup_ss_esp(&fixed_regs, regs);
14526 regs = &fixed_regs;
14527 }
14528diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14529index 37250fe..bf2ec74 100644
14530--- a/arch/x86/kernel/doublefault_32.c
14531+++ b/arch/x86/kernel/doublefault_32.c
14532@@ -11,7 +11,7 @@
14533
14534 #define DOUBLEFAULT_STACKSIZE (1024)
14535 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14536-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14537+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14538
14539 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14540
14541@@ -21,7 +21,7 @@ static void doublefault_fn(void)
14542 unsigned long gdt, tss;
14543
14544 store_gdt(&gdt_desc);
14545- gdt = gdt_desc.address;
14546+ gdt = (unsigned long)gdt_desc.address;
14547
14548 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14549
14550@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14551 /* 0x2 bit is always set */
14552 .flags = X86_EFLAGS_SF | 0x2,
14553 .sp = STACK_START,
14554- .es = __USER_DS,
14555+ .es = __KERNEL_DS,
14556 .cs = __KERNEL_CS,
14557 .ss = __KERNEL_DS,
14558- .ds = __USER_DS,
14559+ .ds = __KERNEL_DS,
14560 .fs = __KERNEL_PERCPU,
14561
14562 .__cr3 = __pa_nodebug(swapper_pg_dir),
14563diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14564index 571246d..81f335c 100644
14565--- a/arch/x86/kernel/dumpstack.c
14566+++ b/arch/x86/kernel/dumpstack.c
14567@@ -2,6 +2,9 @@
14568 * Copyright (C) 1991, 1992 Linus Torvalds
14569 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14570 */
14571+#ifdef CONFIG_GRKERNSEC_HIDESYM
14572+#define __INCLUDED_BY_HIDESYM 1
14573+#endif
14574 #include <linux/kallsyms.h>
14575 #include <linux/kprobes.h>
14576 #include <linux/uaccess.h>
14577@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
14578 static void
14579 print_ftrace_graph_addr(unsigned long addr, void *data,
14580 const struct stacktrace_ops *ops,
14581- struct thread_info *tinfo, int *graph)
14582+ struct task_struct *task, int *graph)
14583 {
14584- struct task_struct *task;
14585 unsigned long ret_addr;
14586 int index;
14587
14588 if (addr != (unsigned long)return_to_handler)
14589 return;
14590
14591- task = tinfo->task;
14592 index = task->curr_ret_stack;
14593
14594 if (!task->ret_stack || index < *graph)
14595@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14596 static inline void
14597 print_ftrace_graph_addr(unsigned long addr, void *data,
14598 const struct stacktrace_ops *ops,
14599- struct thread_info *tinfo, int *graph)
14600+ struct task_struct *task, int *graph)
14601 { }
14602 #endif
14603
14604@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14605 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14606 */
14607
14608-static inline int valid_stack_ptr(struct thread_info *tinfo,
14609- void *p, unsigned int size, void *end)
14610+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14611 {
14612- void *t = tinfo;
14613 if (end) {
14614 if (p < end && p >= (end-THREAD_SIZE))
14615 return 1;
14616@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14617 }
14618
14619 unsigned long
14620-print_context_stack(struct thread_info *tinfo,
14621+print_context_stack(struct task_struct *task, void *stack_start,
14622 unsigned long *stack, unsigned long bp,
14623 const struct stacktrace_ops *ops, void *data,
14624 unsigned long *end, int *graph)
14625 {
14626 struct stack_frame *frame = (struct stack_frame *)bp;
14627
14628- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14629+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14630 unsigned long addr;
14631
14632 addr = *stack;
14633@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
14634 } else {
14635 ops->address(data, addr, 0);
14636 }
14637- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14638+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14639 }
14640 stack++;
14641 }
14642@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
14643 EXPORT_SYMBOL_GPL(print_context_stack);
14644
14645 unsigned long
14646-print_context_stack_bp(struct thread_info *tinfo,
14647+print_context_stack_bp(struct task_struct *task, void *stack_start,
14648 unsigned long *stack, unsigned long bp,
14649 const struct stacktrace_ops *ops, void *data,
14650 unsigned long *end, int *graph)
14651@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14652 struct stack_frame *frame = (struct stack_frame *)bp;
14653 unsigned long *ret_addr = &frame->return_address;
14654
14655- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14656+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14657 unsigned long addr = *ret_addr;
14658
14659 if (!__kernel_text_address(addr))
14660@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14661 ops->address(data, addr, 1);
14662 frame = frame->next_frame;
14663 ret_addr = &frame->return_address;
14664- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14665+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14666 }
14667
14668 return (unsigned long)frame;
14669@@ -189,7 +188,7 @@ void dump_stack(void)
14670
14671 bp = stack_frame(current, NULL);
14672 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14673- current->pid, current->comm, print_tainted(),
14674+ task_pid_nr(current), current->comm, print_tainted(),
14675 init_utsname()->release,
14676 (int)strcspn(init_utsname()->version, " "),
14677 init_utsname()->version);
14678@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
14679 }
14680 EXPORT_SYMBOL_GPL(oops_begin);
14681
14682+extern void gr_handle_kernel_exploit(void);
14683+
14684 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14685 {
14686 if (regs && kexec_should_crash(current))
14687@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14688 panic("Fatal exception in interrupt");
14689 if (panic_on_oops)
14690 panic("Fatal exception");
14691- do_exit(signr);
14692+
14693+ gr_handle_kernel_exploit();
14694+
14695+ do_group_exit(signr);
14696 }
14697
14698 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14699@@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14700
14701 show_regs(regs);
14702 #ifdef CONFIG_X86_32
14703- if (user_mode_vm(regs)) {
14704+ if (user_mode(regs)) {
14705 sp = regs->sp;
14706 ss = regs->ss & 0xffff;
14707 } else {
14708@@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14709 unsigned long flags = oops_begin();
14710 int sig = SIGSEGV;
14711
14712- if (!user_mode_vm(regs))
14713+ if (!user_mode(regs))
14714 report_bug(regs->ip, regs);
14715
14716 if (__die(str, regs, err))
14717diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14718index e0b1d78..a8ade5e 100644
14719--- a/arch/x86/kernel/dumpstack_32.c
14720+++ b/arch/x86/kernel/dumpstack_32.c
14721@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14722 bp = stack_frame(task, regs);
14723
14724 for (;;) {
14725- struct thread_info *context;
14726+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14727
14728- context = (struct thread_info *)
14729- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14730- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14731+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14732
14733- stack = (unsigned long *)context->previous_esp;
14734- if (!stack)
14735+ if (stack_start == task_stack_page(task))
14736 break;
14737+ stack = *(unsigned long **)stack_start;
14738 if (ops->stack(data, "IRQ") < 0)
14739 break;
14740 touch_nmi_watchdog();
14741@@ -87,7 +85,7 @@ void show_regs(struct pt_regs *regs)
14742 int i;
14743
14744 print_modules();
14745- __show_regs(regs, !user_mode_vm(regs));
14746+ __show_regs(regs, !user_mode(regs));
14747
14748 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
14749 TASK_COMM_LEN, current->comm, task_pid_nr(current),
14750@@ -96,21 +94,22 @@ void show_regs(struct pt_regs *regs)
14751 * When in-kernel, we also print out the stack and code at the
14752 * time of the fault..
14753 */
14754- if (!user_mode_vm(regs)) {
14755+ if (!user_mode(regs)) {
14756 unsigned int code_prologue = code_bytes * 43 / 64;
14757 unsigned int code_len = code_bytes;
14758 unsigned char c;
14759 u8 *ip;
14760+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14761
14762 printk(KERN_EMERG "Stack:\n");
14763 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14764
14765 printk(KERN_EMERG "Code: ");
14766
14767- ip = (u8 *)regs->ip - code_prologue;
14768+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14769 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14770 /* try starting at IP */
14771- ip = (u8 *)regs->ip;
14772+ ip = (u8 *)regs->ip + cs_base;
14773 code_len = code_len - code_prologue + 1;
14774 }
14775 for (i = 0; i < code_len; i++, ip++) {
14776@@ -119,7 +118,7 @@ void show_regs(struct pt_regs *regs)
14777 printk(KERN_CONT " Bad EIP value.");
14778 break;
14779 }
14780- if (ip == (u8 *)regs->ip)
14781+ if (ip == (u8 *)regs->ip + cs_base)
14782 printk(KERN_CONT "<%02x> ", c);
14783 else
14784 printk(KERN_CONT "%02x ", c);
14785@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14786 {
14787 unsigned short ud2;
14788
14789+ ip = ktla_ktva(ip);
14790 if (ip < PAGE_OFFSET)
14791 return 0;
14792 if (probe_kernel_address((unsigned short *)ip, ud2))
14793@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14794
14795 return ud2 == 0x0b0f;
14796 }
14797+
14798+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14799+void pax_check_alloca(unsigned long size)
14800+{
14801+ unsigned long sp = (unsigned long)&sp, stack_left;
14802+
14803+ /* all kernel stacks are of the same size */
14804+ stack_left = sp & (THREAD_SIZE - 1);
14805+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14806+}
14807+EXPORT_SYMBOL(pax_check_alloca);
14808+#endif
14809diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14810index 791b761..2ab6e33 100644
14811--- a/arch/x86/kernel/dumpstack_64.c
14812+++ b/arch/x86/kernel/dumpstack_64.c
14813@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14814 unsigned long *irq_stack_end =
14815 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14816 unsigned used = 0;
14817- struct thread_info *tinfo;
14818 int graph = 0;
14819 unsigned long dummy;
14820+ void *stack_start;
14821
14822 if (!task)
14823 task = current;
14824@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14825 * current stack address. If the stacks consist of nested
14826 * exceptions
14827 */
14828- tinfo = task_thread_info(task);
14829 for (;;) {
14830 char *id;
14831 unsigned long *estack_end;
14832+
14833 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14834 &used, &id);
14835
14836@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14837 if (ops->stack(data, id) < 0)
14838 break;
14839
14840- bp = ops->walk_stack(tinfo, stack, bp, ops,
14841+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14842 data, estack_end, &graph);
14843 ops->stack(data, "<EOE>");
14844 /*
14845@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14846 * second-to-last pointer (index -2 to end) in the
14847 * exception stack:
14848 */
14849+ if ((u16)estack_end[-1] != __KERNEL_DS)
14850+ goto out;
14851 stack = (unsigned long *) estack_end[-2];
14852 continue;
14853 }
14854@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14855 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14856 if (ops->stack(data, "IRQ") < 0)
14857 break;
14858- bp = ops->walk_stack(tinfo, stack, bp,
14859+ bp = ops->walk_stack(task, irq_stack, stack, bp,
14860 ops, data, irq_stack_end, &graph);
14861 /*
14862 * We link to the next stack (which would be
14863@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14864 /*
14865 * This handles the process stack:
14866 */
14867- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14868+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14869+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14870+out:
14871 put_cpu();
14872 }
14873 EXPORT_SYMBOL(dump_trace);
14874@@ -305,3 +309,50 @@ int is_valid_bugaddr(unsigned long ip)
14875
14876 return ud2 == 0x0b0f;
14877 }
14878+
14879+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14880+void pax_check_alloca(unsigned long size)
14881+{
14882+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14883+ unsigned cpu, used;
14884+ char *id;
14885+
14886+ /* check the process stack first */
14887+ stack_start = (unsigned long)task_stack_page(current);
14888+ stack_end = stack_start + THREAD_SIZE;
14889+ if (likely(stack_start <= sp && sp < stack_end)) {
14890+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14891+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14892+ return;
14893+ }
14894+
14895+ cpu = get_cpu();
14896+
14897+ /* check the irq stacks */
14898+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14899+ stack_start = stack_end - IRQ_STACK_SIZE;
14900+ if (stack_start <= sp && sp < stack_end) {
14901+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14902+ put_cpu();
14903+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14904+ return;
14905+ }
14906+
14907+ /* check the exception stacks */
14908+ used = 0;
14909+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14910+ stack_start = stack_end - EXCEPTION_STKSZ;
14911+ if (stack_end && stack_start <= sp && sp < stack_end) {
14912+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14913+ put_cpu();
14914+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14915+ return;
14916+ }
14917+
14918+ put_cpu();
14919+
14920+ /* unknown stack */
14921+ BUG();
14922+}
14923+EXPORT_SYMBOL(pax_check_alloca);
14924+#endif
14925diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14926index 9b9f18b..9fcaa04 100644
14927--- a/arch/x86/kernel/early_printk.c
14928+++ b/arch/x86/kernel/early_printk.c
14929@@ -7,6 +7,7 @@
14930 #include <linux/pci_regs.h>
14931 #include <linux/pci_ids.h>
14932 #include <linux/errno.h>
14933+#include <linux/sched.h>
14934 #include <asm/io.h>
14935 #include <asm/processor.h>
14936 #include <asm/fcntl.h>
14937diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14938index 623f288..8bdd78a 100644
14939--- a/arch/x86/kernel/entry_32.S
14940+++ b/arch/x86/kernel/entry_32.S
14941@@ -176,13 +176,153 @@
14942 /*CFI_REL_OFFSET gs, PT_GS*/
14943 .endm
14944 .macro SET_KERNEL_GS reg
14945+
14946+#ifdef CONFIG_CC_STACKPROTECTOR
14947 movl $(__KERNEL_STACK_CANARY), \reg
14948+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14949+ movl $(__USER_DS), \reg
14950+#else
14951+ xorl \reg, \reg
14952+#endif
14953+
14954 movl \reg, %gs
14955 .endm
14956
14957 #endif /* CONFIG_X86_32_LAZY_GS */
14958
14959-.macro SAVE_ALL
14960+.macro pax_enter_kernel
14961+#ifdef CONFIG_PAX_KERNEXEC
14962+ call pax_enter_kernel
14963+#endif
14964+.endm
14965+
14966+.macro pax_exit_kernel
14967+#ifdef CONFIG_PAX_KERNEXEC
14968+ call pax_exit_kernel
14969+#endif
14970+.endm
14971+
14972+#ifdef CONFIG_PAX_KERNEXEC
14973+ENTRY(pax_enter_kernel)
14974+#ifdef CONFIG_PARAVIRT
14975+ pushl %eax
14976+ pushl %ecx
14977+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14978+ mov %eax, %esi
14979+#else
14980+ mov %cr0, %esi
14981+#endif
14982+ bts $16, %esi
14983+ jnc 1f
14984+ mov %cs, %esi
14985+ cmp $__KERNEL_CS, %esi
14986+ jz 3f
14987+ ljmp $__KERNEL_CS, $3f
14988+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14989+2:
14990+#ifdef CONFIG_PARAVIRT
14991+ mov %esi, %eax
14992+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14993+#else
14994+ mov %esi, %cr0
14995+#endif
14996+3:
14997+#ifdef CONFIG_PARAVIRT
14998+ popl %ecx
14999+ popl %eax
15000+#endif
15001+ ret
15002+ENDPROC(pax_enter_kernel)
15003+
15004+ENTRY(pax_exit_kernel)
15005+#ifdef CONFIG_PARAVIRT
15006+ pushl %eax
15007+ pushl %ecx
15008+#endif
15009+ mov %cs, %esi
15010+ cmp $__KERNEXEC_KERNEL_CS, %esi
15011+ jnz 2f
15012+#ifdef CONFIG_PARAVIRT
15013+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15014+ mov %eax, %esi
15015+#else
15016+ mov %cr0, %esi
15017+#endif
15018+ btr $16, %esi
15019+ ljmp $__KERNEL_CS, $1f
15020+1:
15021+#ifdef CONFIG_PARAVIRT
15022+ mov %esi, %eax
15023+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15024+#else
15025+ mov %esi, %cr0
15026+#endif
15027+2:
15028+#ifdef CONFIG_PARAVIRT
15029+ popl %ecx
15030+ popl %eax
15031+#endif
15032+ ret
15033+ENDPROC(pax_exit_kernel)
15034+#endif
15035+
15036+.macro pax_erase_kstack
15037+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15038+ call pax_erase_kstack
15039+#endif
15040+.endm
15041+
15042+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15043+/*
15044+ * ebp: thread_info
15045+ */
15046+ENTRY(pax_erase_kstack)
15047+ pushl %edi
15048+ pushl %ecx
15049+ pushl %eax
15050+
15051+ mov TI_lowest_stack(%ebp), %edi
15052+ mov $-0xBEEF, %eax
15053+ std
15054+
15055+1: mov %edi, %ecx
15056+ and $THREAD_SIZE_asm - 1, %ecx
15057+ shr $2, %ecx
15058+ repne scasl
15059+ jecxz 2f
15060+
15061+ cmp $2*16, %ecx
15062+ jc 2f
15063+
15064+ mov $2*16, %ecx
15065+ repe scasl
15066+ jecxz 2f
15067+ jne 1b
15068+
15069+2: cld
15070+ mov %esp, %ecx
15071+ sub %edi, %ecx
15072+
15073+ cmp $THREAD_SIZE_asm, %ecx
15074+ jb 3f
15075+ ud2
15076+3:
15077+
15078+ shr $2, %ecx
15079+ rep stosl
15080+
15081+ mov TI_task_thread_sp0(%ebp), %edi
15082+ sub $128, %edi
15083+ mov %edi, TI_lowest_stack(%ebp)
15084+
15085+ popl %eax
15086+ popl %ecx
15087+ popl %edi
15088+ ret
15089+ENDPROC(pax_erase_kstack)
15090+#endif
15091+
15092+.macro __SAVE_ALL _DS
15093 cld
15094 PUSH_GS
15095 pushl_cfi %fs
15096@@ -205,7 +345,7 @@
15097 CFI_REL_OFFSET ecx, 0
15098 pushl_cfi %ebx
15099 CFI_REL_OFFSET ebx, 0
15100- movl $(__USER_DS), %edx
15101+ movl $\_DS, %edx
15102 movl %edx, %ds
15103 movl %edx, %es
15104 movl $(__KERNEL_PERCPU), %edx
15105@@ -213,6 +353,15 @@
15106 SET_KERNEL_GS %edx
15107 .endm
15108
15109+.macro SAVE_ALL
15110+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15111+ __SAVE_ALL __KERNEL_DS
15112+ pax_enter_kernel
15113+#else
15114+ __SAVE_ALL __USER_DS
15115+#endif
15116+.endm
15117+
15118 .macro RESTORE_INT_REGS
15119 popl_cfi %ebx
15120 CFI_RESTORE ebx
15121@@ -296,7 +445,7 @@ ENTRY(ret_from_fork)
15122 popfl_cfi
15123 jmp syscall_exit
15124 CFI_ENDPROC
15125-END(ret_from_fork)
15126+ENDPROC(ret_from_fork)
15127
15128 /*
15129 * Interrupt exit functions should be protected against kprobes
15130@@ -329,7 +478,15 @@ ret_from_intr:
15131 andl $SEGMENT_RPL_MASK, %eax
15132 #endif
15133 cmpl $USER_RPL, %eax
15134+
15135+#ifdef CONFIG_PAX_KERNEXEC
15136+ jae resume_userspace
15137+
15138+ pax_exit_kernel
15139+ jmp resume_kernel
15140+#else
15141 jb resume_kernel # not returning to v8086 or userspace
15142+#endif
15143
15144 ENTRY(resume_userspace)
15145 LOCKDEP_SYS_EXIT
15146@@ -341,8 +498,8 @@ ENTRY(resume_userspace)
15147 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15148 # int/exception return?
15149 jne work_pending
15150- jmp restore_all
15151-END(ret_from_exception)
15152+ jmp restore_all_pax
15153+ENDPROC(ret_from_exception)
15154
15155 #ifdef CONFIG_PREEMPT
15156 ENTRY(resume_kernel)
15157@@ -357,7 +514,7 @@ need_resched:
15158 jz restore_all
15159 call preempt_schedule_irq
15160 jmp need_resched
15161-END(resume_kernel)
15162+ENDPROC(resume_kernel)
15163 #endif
15164 CFI_ENDPROC
15165 /*
15166@@ -391,28 +548,43 @@ sysenter_past_esp:
15167 /*CFI_REL_OFFSET cs, 0*/
15168 /*
15169 * Push current_thread_info()->sysenter_return to the stack.
15170- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15171- * pushed above; +8 corresponds to copy_thread's esp0 setting.
15172 */
15173- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
15174+ pushl_cfi $0
15175 CFI_REL_OFFSET eip, 0
15176
15177 pushl_cfi %eax
15178 SAVE_ALL
15179+ GET_THREAD_INFO(%ebp)
15180+ movl TI_sysenter_return(%ebp),%ebp
15181+ movl %ebp,PT_EIP(%esp)
15182 ENABLE_INTERRUPTS(CLBR_NONE)
15183
15184 /*
15185 * Load the potential sixth argument from user stack.
15186 * Careful about security.
15187 */
15188+ movl PT_OLDESP(%esp),%ebp
15189+
15190+#ifdef CONFIG_PAX_MEMORY_UDEREF
15191+ mov PT_OLDSS(%esp),%ds
15192+1: movl %ds:(%ebp),%ebp
15193+ push %ss
15194+ pop %ds
15195+#else
15196 cmpl $__PAGE_OFFSET-3,%ebp
15197 jae syscall_fault
15198 1: movl (%ebp),%ebp
15199+#endif
15200+
15201 movl %ebp,PT_EBP(%esp)
15202 _ASM_EXTABLE(1b,syscall_fault)
15203
15204 GET_THREAD_INFO(%ebp)
15205
15206+#ifdef CONFIG_PAX_RANDKSTACK
15207+ pax_erase_kstack
15208+#endif
15209+
15210 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
15211 jnz sysenter_audit
15212 sysenter_do_call:
15213@@ -427,12 +599,24 @@ sysenter_do_call:
15214 testl $_TIF_ALLWORK_MASK, %ecx
15215 jne sysexit_audit
15216 sysenter_exit:
15217+
15218+#ifdef CONFIG_PAX_RANDKSTACK
15219+ pushl_cfi %eax
15220+ movl %esp, %eax
15221+ call pax_randomize_kstack
15222+ popl_cfi %eax
15223+#endif
15224+
15225+ pax_erase_kstack
15226+
15227 /* if something modifies registers it must also disable sysexit */
15228 movl PT_EIP(%esp), %edx
15229 movl PT_OLDESP(%esp), %ecx
15230 xorl %ebp,%ebp
15231 TRACE_IRQS_ON
15232 1: mov PT_FS(%esp), %fs
15233+2: mov PT_DS(%esp), %ds
15234+3: mov PT_ES(%esp), %es
15235 PTGS_TO_GS
15236 ENABLE_INTERRUPTS_SYSEXIT
15237
15238@@ -449,6 +633,9 @@ sysenter_audit:
15239 movl %eax,%edx /* 2nd arg: syscall number */
15240 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15241 call __audit_syscall_entry
15242+
15243+ pax_erase_kstack
15244+
15245 pushl_cfi %ebx
15246 movl PT_EAX(%esp),%eax /* reload syscall number */
15247 jmp sysenter_do_call
15248@@ -474,10 +661,16 @@ sysexit_audit:
15249
15250 CFI_ENDPROC
15251 .pushsection .fixup,"ax"
15252-2: movl $0,PT_FS(%esp)
15253+4: movl $0,PT_FS(%esp)
15254+ jmp 1b
15255+5: movl $0,PT_DS(%esp)
15256+ jmp 1b
15257+6: movl $0,PT_ES(%esp)
15258 jmp 1b
15259 .popsection
15260- _ASM_EXTABLE(1b,2b)
15261+ _ASM_EXTABLE(1b,4b)
15262+ _ASM_EXTABLE(2b,5b)
15263+ _ASM_EXTABLE(3b,6b)
15264 PTGS_TO_GS_EX
15265 ENDPROC(ia32_sysenter_target)
15266
15267@@ -491,6 +684,11 @@ ENTRY(system_call)
15268 pushl_cfi %eax # save orig_eax
15269 SAVE_ALL
15270 GET_THREAD_INFO(%ebp)
15271+
15272+#ifdef CONFIG_PAX_RANDKSTACK
15273+ pax_erase_kstack
15274+#endif
15275+
15276 # system call tracing in operation / emulation
15277 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
15278 jnz syscall_trace_entry
15279@@ -509,6 +707,15 @@ syscall_exit:
15280 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15281 jne syscall_exit_work
15282
15283+restore_all_pax:
15284+
15285+#ifdef CONFIG_PAX_RANDKSTACK
15286+ movl %esp, %eax
15287+ call pax_randomize_kstack
15288+#endif
15289+
15290+ pax_erase_kstack
15291+
15292 restore_all:
15293 TRACE_IRQS_IRET
15294 restore_all_notrace:
15295@@ -565,14 +772,34 @@ ldt_ss:
15296 * compensating for the offset by changing to the ESPFIX segment with
15297 * a base address that matches for the difference.
15298 */
15299-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
15300+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
15301 mov %esp, %edx /* load kernel esp */
15302 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15303 mov %dx, %ax /* eax: new kernel esp */
15304 sub %eax, %edx /* offset (low word is 0) */
15305+#ifdef CONFIG_SMP
15306+ movl PER_CPU_VAR(cpu_number), %ebx
15307+ shll $PAGE_SHIFT_asm, %ebx
15308+ addl $cpu_gdt_table, %ebx
15309+#else
15310+ movl $cpu_gdt_table, %ebx
15311+#endif
15312 shr $16, %edx
15313- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
15314- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
15315+
15316+#ifdef CONFIG_PAX_KERNEXEC
15317+ mov %cr0, %esi
15318+ btr $16, %esi
15319+ mov %esi, %cr0
15320+#endif
15321+
15322+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
15323+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15324+
15325+#ifdef CONFIG_PAX_KERNEXEC
15326+ bts $16, %esi
15327+ mov %esi, %cr0
15328+#endif
15329+
15330 pushl_cfi $__ESPFIX_SS
15331 pushl_cfi %eax /* new kernel esp */
15332 /* Disable interrupts, but do not irqtrace this section: we
15333@@ -601,35 +828,23 @@ work_resched:
15334 movl TI_flags(%ebp), %ecx
15335 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15336 # than syscall tracing?
15337- jz restore_all
15338+ jz restore_all_pax
15339 testb $_TIF_NEED_RESCHED, %cl
15340 jnz work_resched
15341
15342 work_notifysig: # deal with pending signals and
15343 # notify-resume requests
15344+ movl %esp, %eax
15345 #ifdef CONFIG_VM86
15346 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15347- movl %esp, %eax
15348- jne work_notifysig_v86 # returning to kernel-space or
15349+ jz 1f # returning to kernel-space or
15350 # vm86-space
15351- TRACE_IRQS_ON
15352- ENABLE_INTERRUPTS(CLBR_NONE)
15353- movb PT_CS(%esp), %bl
15354- andb $SEGMENT_RPL_MASK, %bl
15355- cmpb $USER_RPL, %bl
15356- jb resume_kernel
15357- xorl %edx, %edx
15358- call do_notify_resume
15359- jmp resume_userspace
15360
15361- ALIGN
15362-work_notifysig_v86:
15363 pushl_cfi %ecx # save ti_flags for do_notify_resume
15364 call save_v86_state # %eax contains pt_regs pointer
15365 popl_cfi %ecx
15366 movl %eax, %esp
15367-#else
15368- movl %esp, %eax
15369+1:
15370 #endif
15371 TRACE_IRQS_ON
15372 ENABLE_INTERRUPTS(CLBR_NONE)
15373@@ -640,7 +855,7 @@ work_notifysig_v86:
15374 xorl %edx, %edx
15375 call do_notify_resume
15376 jmp resume_userspace
15377-END(work_pending)
15378+ENDPROC(work_pending)
15379
15380 # perform syscall exit tracing
15381 ALIGN
15382@@ -648,11 +863,14 @@ syscall_trace_entry:
15383 movl $-ENOSYS,PT_EAX(%esp)
15384 movl %esp, %eax
15385 call syscall_trace_enter
15386+
15387+ pax_erase_kstack
15388+
15389 /* What it returned is what we'll actually use. */
15390 cmpl $(NR_syscalls), %eax
15391 jnae syscall_call
15392 jmp syscall_exit
15393-END(syscall_trace_entry)
15394+ENDPROC(syscall_trace_entry)
15395
15396 # perform syscall exit tracing
15397 ALIGN
15398@@ -665,20 +883,24 @@ syscall_exit_work:
15399 movl %esp, %eax
15400 call syscall_trace_leave
15401 jmp resume_userspace
15402-END(syscall_exit_work)
15403+ENDPROC(syscall_exit_work)
15404 CFI_ENDPROC
15405
15406 RING0_INT_FRAME # can't unwind into user space anyway
15407 syscall_fault:
15408+#ifdef CONFIG_PAX_MEMORY_UDEREF
15409+ push %ss
15410+ pop %ds
15411+#endif
15412 GET_THREAD_INFO(%ebp)
15413 movl $-EFAULT,PT_EAX(%esp)
15414 jmp resume_userspace
15415-END(syscall_fault)
15416+ENDPROC(syscall_fault)
15417
15418 syscall_badsys:
15419 movl $-ENOSYS,PT_EAX(%esp)
15420 jmp resume_userspace
15421-END(syscall_badsys)
15422+ENDPROC(syscall_badsys)
15423 CFI_ENDPROC
15424 /*
15425 * End of kprobes section
15426@@ -750,6 +972,36 @@ ENTRY(ptregs_clone)
15427 CFI_ENDPROC
15428 ENDPROC(ptregs_clone)
15429
15430+ ALIGN;
15431+ENTRY(kernel_execve)
15432+ CFI_STARTPROC
15433+ pushl_cfi %ebp
15434+ sub $PT_OLDSS+4,%esp
15435+ pushl_cfi %edi
15436+ pushl_cfi %ecx
15437+ pushl_cfi %eax
15438+ lea 3*4(%esp),%edi
15439+ mov $PT_OLDSS/4+1,%ecx
15440+ xorl %eax,%eax
15441+ rep stosl
15442+ popl_cfi %eax
15443+ popl_cfi %ecx
15444+ popl_cfi %edi
15445+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15446+ pushl_cfi %esp
15447+ call sys_execve
15448+ add $4,%esp
15449+ CFI_ADJUST_CFA_OFFSET -4
15450+ GET_THREAD_INFO(%ebp)
15451+ test %eax,%eax
15452+ jz syscall_exit
15453+ add $PT_OLDSS+4,%esp
15454+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
15455+ popl_cfi %ebp
15456+ ret
15457+ CFI_ENDPROC
15458+ENDPROC(kernel_execve)
15459+
15460 .macro FIXUP_ESPFIX_STACK
15461 /*
15462 * Switch back for ESPFIX stack to the normal zerobased stack
15463@@ -759,8 +1011,15 @@ ENDPROC(ptregs_clone)
15464 * normal stack and adjusts ESP with the matching offset.
15465 */
15466 /* fixup the stack */
15467- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
15468- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
15469+#ifdef CONFIG_SMP
15470+ movl PER_CPU_VAR(cpu_number), %ebx
15471+ shll $PAGE_SHIFT_asm, %ebx
15472+ addl $cpu_gdt_table, %ebx
15473+#else
15474+ movl $cpu_gdt_table, %ebx
15475+#endif
15476+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
15477+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
15478 shl $16, %eax
15479 addl %esp, %eax /* the adjusted stack pointer */
15480 pushl_cfi $__KERNEL_DS
15481@@ -813,7 +1072,7 @@ vector=vector+1
15482 .endr
15483 2: jmp common_interrupt
15484 .endr
15485-END(irq_entries_start)
15486+ENDPROC(irq_entries_start)
15487
15488 .previous
15489 END(interrupt)
15490@@ -861,7 +1120,7 @@ ENTRY(coprocessor_error)
15491 pushl_cfi $do_coprocessor_error
15492 jmp error_code
15493 CFI_ENDPROC
15494-END(coprocessor_error)
15495+ENDPROC(coprocessor_error)
15496
15497 ENTRY(simd_coprocessor_error)
15498 RING0_INT_FRAME
15499@@ -882,7 +1141,7 @@ ENTRY(simd_coprocessor_error)
15500 #endif
15501 jmp error_code
15502 CFI_ENDPROC
15503-END(simd_coprocessor_error)
15504+ENDPROC(simd_coprocessor_error)
15505
15506 ENTRY(device_not_available)
15507 RING0_INT_FRAME
15508@@ -890,18 +1149,18 @@ ENTRY(device_not_available)
15509 pushl_cfi $do_device_not_available
15510 jmp error_code
15511 CFI_ENDPROC
15512-END(device_not_available)
15513+ENDPROC(device_not_available)
15514
15515 #ifdef CONFIG_PARAVIRT
15516 ENTRY(native_iret)
15517 iret
15518 _ASM_EXTABLE(native_iret, iret_exc)
15519-END(native_iret)
15520+ENDPROC(native_iret)
15521
15522 ENTRY(native_irq_enable_sysexit)
15523 sti
15524 sysexit
15525-END(native_irq_enable_sysexit)
15526+ENDPROC(native_irq_enable_sysexit)
15527 #endif
15528
15529 ENTRY(overflow)
15530@@ -910,7 +1169,7 @@ ENTRY(overflow)
15531 pushl_cfi $do_overflow
15532 jmp error_code
15533 CFI_ENDPROC
15534-END(overflow)
15535+ENDPROC(overflow)
15536
15537 ENTRY(bounds)
15538 RING0_INT_FRAME
15539@@ -918,7 +1177,7 @@ ENTRY(bounds)
15540 pushl_cfi $do_bounds
15541 jmp error_code
15542 CFI_ENDPROC
15543-END(bounds)
15544+ENDPROC(bounds)
15545
15546 ENTRY(invalid_op)
15547 RING0_INT_FRAME
15548@@ -926,7 +1185,7 @@ ENTRY(invalid_op)
15549 pushl_cfi $do_invalid_op
15550 jmp error_code
15551 CFI_ENDPROC
15552-END(invalid_op)
15553+ENDPROC(invalid_op)
15554
15555 ENTRY(coprocessor_segment_overrun)
15556 RING0_INT_FRAME
15557@@ -934,35 +1193,35 @@ ENTRY(coprocessor_segment_overrun)
15558 pushl_cfi $do_coprocessor_segment_overrun
15559 jmp error_code
15560 CFI_ENDPROC
15561-END(coprocessor_segment_overrun)
15562+ENDPROC(coprocessor_segment_overrun)
15563
15564 ENTRY(invalid_TSS)
15565 RING0_EC_FRAME
15566 pushl_cfi $do_invalid_TSS
15567 jmp error_code
15568 CFI_ENDPROC
15569-END(invalid_TSS)
15570+ENDPROC(invalid_TSS)
15571
15572 ENTRY(segment_not_present)
15573 RING0_EC_FRAME
15574 pushl_cfi $do_segment_not_present
15575 jmp error_code
15576 CFI_ENDPROC
15577-END(segment_not_present)
15578+ENDPROC(segment_not_present)
15579
15580 ENTRY(stack_segment)
15581 RING0_EC_FRAME
15582 pushl_cfi $do_stack_segment
15583 jmp error_code
15584 CFI_ENDPROC
15585-END(stack_segment)
15586+ENDPROC(stack_segment)
15587
15588 ENTRY(alignment_check)
15589 RING0_EC_FRAME
15590 pushl_cfi $do_alignment_check
15591 jmp error_code
15592 CFI_ENDPROC
15593-END(alignment_check)
15594+ENDPROC(alignment_check)
15595
15596 ENTRY(divide_error)
15597 RING0_INT_FRAME
15598@@ -970,7 +1229,7 @@ ENTRY(divide_error)
15599 pushl_cfi $do_divide_error
15600 jmp error_code
15601 CFI_ENDPROC
15602-END(divide_error)
15603+ENDPROC(divide_error)
15604
15605 #ifdef CONFIG_X86_MCE
15606 ENTRY(machine_check)
15607@@ -979,7 +1238,7 @@ ENTRY(machine_check)
15608 pushl_cfi machine_check_vector
15609 jmp error_code
15610 CFI_ENDPROC
15611-END(machine_check)
15612+ENDPROC(machine_check)
15613 #endif
15614
15615 ENTRY(spurious_interrupt_bug)
15616@@ -988,7 +1247,7 @@ ENTRY(spurious_interrupt_bug)
15617 pushl_cfi $do_spurious_interrupt_bug
15618 jmp error_code
15619 CFI_ENDPROC
15620-END(spurious_interrupt_bug)
15621+ENDPROC(spurious_interrupt_bug)
15622 /*
15623 * End of kprobes section
15624 */
15625@@ -1100,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
15626
15627 ENTRY(mcount)
15628 ret
15629-END(mcount)
15630+ENDPROC(mcount)
15631
15632 ENTRY(ftrace_caller)
15633 cmpl $0, function_trace_stop
15634@@ -1129,7 +1388,7 @@ ftrace_graph_call:
15635 .globl ftrace_stub
15636 ftrace_stub:
15637 ret
15638-END(ftrace_caller)
15639+ENDPROC(ftrace_caller)
15640
15641 #else /* ! CONFIG_DYNAMIC_FTRACE */
15642
15643@@ -1165,7 +1424,7 @@ trace:
15644 popl %ecx
15645 popl %eax
15646 jmp ftrace_stub
15647-END(mcount)
15648+ENDPROC(mcount)
15649 #endif /* CONFIG_DYNAMIC_FTRACE */
15650 #endif /* CONFIG_FUNCTION_TRACER */
15651
15652@@ -1186,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
15653 popl %ecx
15654 popl %eax
15655 ret
15656-END(ftrace_graph_caller)
15657+ENDPROC(ftrace_graph_caller)
15658
15659 .globl return_to_handler
15660 return_to_handler:
15661@@ -1241,15 +1500,18 @@ error_code:
15662 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15663 REG_TO_PTGS %ecx
15664 SET_KERNEL_GS %ecx
15665- movl $(__USER_DS), %ecx
15666+ movl $(__KERNEL_DS), %ecx
15667 movl %ecx, %ds
15668 movl %ecx, %es
15669+
15670+ pax_enter_kernel
15671+
15672 TRACE_IRQS_OFF
15673 movl %esp,%eax # pt_regs pointer
15674 call *%edi
15675 jmp ret_from_exception
15676 CFI_ENDPROC
15677-END(page_fault)
15678+ENDPROC(page_fault)
15679
15680 /*
15681 * Debug traps and NMI can happen at the one SYSENTER instruction
15682@@ -1291,7 +1553,7 @@ debug_stack_correct:
15683 call do_debug
15684 jmp ret_from_exception
15685 CFI_ENDPROC
15686-END(debug)
15687+ENDPROC(debug)
15688
15689 /*
15690 * NMI is doubly nasty. It can happen _while_ we're handling
15691@@ -1328,6 +1590,9 @@ nmi_stack_correct:
15692 xorl %edx,%edx # zero error code
15693 movl %esp,%eax # pt_regs pointer
15694 call do_nmi
15695+
15696+ pax_exit_kernel
15697+
15698 jmp restore_all_notrace
15699 CFI_ENDPROC
15700
15701@@ -1364,12 +1629,15 @@ nmi_espfix_stack:
15702 FIXUP_ESPFIX_STACK # %eax == %esp
15703 xorl %edx,%edx # zero error code
15704 call do_nmi
15705+
15706+ pax_exit_kernel
15707+
15708 RESTORE_REGS
15709 lss 12+4(%esp), %esp # back to espfix stack
15710 CFI_ADJUST_CFA_OFFSET -24
15711 jmp irq_return
15712 CFI_ENDPROC
15713-END(nmi)
15714+ENDPROC(nmi)
15715
15716 ENTRY(int3)
15717 RING0_INT_FRAME
15718@@ -1381,14 +1649,14 @@ ENTRY(int3)
15719 call do_int3
15720 jmp ret_from_exception
15721 CFI_ENDPROC
15722-END(int3)
15723+ENDPROC(int3)
15724
15725 ENTRY(general_protection)
15726 RING0_EC_FRAME
15727 pushl_cfi $do_general_protection
15728 jmp error_code
15729 CFI_ENDPROC
15730-END(general_protection)
15731+ENDPROC(general_protection)
15732
15733 #ifdef CONFIG_KVM_GUEST
15734 ENTRY(async_page_fault)
15735@@ -1396,7 +1664,7 @@ ENTRY(async_page_fault)
15736 pushl_cfi $do_async_page_fault
15737 jmp error_code
15738 CFI_ENDPROC
15739-END(async_page_fault)
15740+ENDPROC(async_page_fault)
15741 #endif
15742
15743 /*
15744diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15745index 7d65133..c888d5f 100644
15746--- a/arch/x86/kernel/entry_64.S
15747+++ b/arch/x86/kernel/entry_64.S
15748@@ -57,6 +57,8 @@
15749 #include <asm/percpu.h>
15750 #include <asm/asm.h>
15751 #include <linux/err.h>
15752+#include <asm/pgtable.h>
15753+#include <asm/alternative-asm.h>
15754
15755 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15756 #include <linux/elf-em.h>
15757@@ -70,8 +72,9 @@
15758 #ifdef CONFIG_FUNCTION_TRACER
15759 #ifdef CONFIG_DYNAMIC_FTRACE
15760 ENTRY(mcount)
15761+ pax_force_retaddr
15762 retq
15763-END(mcount)
15764+ENDPROC(mcount)
15765
15766 ENTRY(ftrace_caller)
15767 cmpl $0, function_trace_stop
15768@@ -94,8 +97,9 @@ GLOBAL(ftrace_graph_call)
15769 #endif
15770
15771 GLOBAL(ftrace_stub)
15772+ pax_force_retaddr
15773 retq
15774-END(ftrace_caller)
15775+ENDPROC(ftrace_caller)
15776
15777 #else /* ! CONFIG_DYNAMIC_FTRACE */
15778 ENTRY(mcount)
15779@@ -114,6 +118,7 @@ ENTRY(mcount)
15780 #endif
15781
15782 GLOBAL(ftrace_stub)
15783+ pax_force_retaddr
15784 retq
15785
15786 trace:
15787@@ -123,12 +128,13 @@ trace:
15788 movq 8(%rbp), %rsi
15789 subq $MCOUNT_INSN_SIZE, %rdi
15790
15791+ pax_force_fptr ftrace_trace_function
15792 call *ftrace_trace_function
15793
15794 MCOUNT_RESTORE_FRAME
15795
15796 jmp ftrace_stub
15797-END(mcount)
15798+ENDPROC(mcount)
15799 #endif /* CONFIG_DYNAMIC_FTRACE */
15800 #endif /* CONFIG_FUNCTION_TRACER */
15801
15802@@ -148,8 +154,9 @@ ENTRY(ftrace_graph_caller)
15803
15804 MCOUNT_RESTORE_FRAME
15805
15806+ pax_force_retaddr
15807 retq
15808-END(ftrace_graph_caller)
15809+ENDPROC(ftrace_graph_caller)
15810
15811 GLOBAL(return_to_handler)
15812 subq $24, %rsp
15813@@ -165,6 +172,7 @@ GLOBAL(return_to_handler)
15814 movq 8(%rsp), %rdx
15815 movq (%rsp), %rax
15816 addq $24, %rsp
15817+ pax_force_fptr %rdi
15818 jmp *%rdi
15819 #endif
15820
15821@@ -180,6 +188,280 @@ ENTRY(native_usergs_sysret64)
15822 ENDPROC(native_usergs_sysret64)
15823 #endif /* CONFIG_PARAVIRT */
15824
15825+ .macro ljmpq sel, off
15826+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15827+ .byte 0x48; ljmp *1234f(%rip)
15828+ .pushsection .rodata
15829+ .align 16
15830+ 1234: .quad \off; .word \sel
15831+ .popsection
15832+#else
15833+ pushq $\sel
15834+ pushq $\off
15835+ lretq
15836+#endif
15837+ .endm
15838+
15839+ .macro pax_enter_kernel
15840+ pax_set_fptr_mask
15841+#ifdef CONFIG_PAX_KERNEXEC
15842+ call pax_enter_kernel
15843+#endif
15844+ .endm
15845+
15846+ .macro pax_exit_kernel
15847+#ifdef CONFIG_PAX_KERNEXEC
15848+ call pax_exit_kernel
15849+#endif
15850+ .endm
15851+
15852+#ifdef CONFIG_PAX_KERNEXEC
15853+ENTRY(pax_enter_kernel)
15854+ pushq %rdi
15855+
15856+#ifdef CONFIG_PARAVIRT
15857+ PV_SAVE_REGS(CLBR_RDI)
15858+#endif
15859+
15860+ GET_CR0_INTO_RDI
15861+ bts $16,%rdi
15862+ jnc 3f
15863+ mov %cs,%edi
15864+ cmp $__KERNEL_CS,%edi
15865+ jnz 2f
15866+1:
15867+
15868+#ifdef CONFIG_PARAVIRT
15869+ PV_RESTORE_REGS(CLBR_RDI)
15870+#endif
15871+
15872+ popq %rdi
15873+ pax_force_retaddr
15874+ retq
15875+
15876+2: ljmpq __KERNEL_CS,1f
15877+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15878+4: SET_RDI_INTO_CR0
15879+ jmp 1b
15880+ENDPROC(pax_enter_kernel)
15881+
15882+ENTRY(pax_exit_kernel)
15883+ pushq %rdi
15884+
15885+#ifdef CONFIG_PARAVIRT
15886+ PV_SAVE_REGS(CLBR_RDI)
15887+#endif
15888+
15889+ mov %cs,%rdi
15890+ cmp $__KERNEXEC_KERNEL_CS,%edi
15891+ jz 2f
15892+1:
15893+
15894+#ifdef CONFIG_PARAVIRT
15895+ PV_RESTORE_REGS(CLBR_RDI);
15896+#endif
15897+
15898+ popq %rdi
15899+ pax_force_retaddr
15900+ retq
15901+
15902+2: GET_CR0_INTO_RDI
15903+ btr $16,%rdi
15904+ ljmpq __KERNEL_CS,3f
15905+3: SET_RDI_INTO_CR0
15906+ jmp 1b
15907+#ifdef CONFIG_PARAVIRT
15908+ PV_RESTORE_REGS(CLBR_RDI);
15909+#endif
15910+
15911+ popq %rdi
15912+ pax_force_retaddr
15913+ retq
15914+ENDPROC(pax_exit_kernel)
15915+#endif
15916+
15917+ .macro pax_enter_kernel_user
15918+ pax_set_fptr_mask
15919+#ifdef CONFIG_PAX_MEMORY_UDEREF
15920+ call pax_enter_kernel_user
15921+#endif
15922+ .endm
15923+
15924+ .macro pax_exit_kernel_user
15925+#ifdef CONFIG_PAX_MEMORY_UDEREF
15926+ call pax_exit_kernel_user
15927+#endif
15928+#ifdef CONFIG_PAX_RANDKSTACK
15929+ pushq %rax
15930+ call pax_randomize_kstack
15931+ popq %rax
15932+#endif
15933+ .endm
15934+
15935+#ifdef CONFIG_PAX_MEMORY_UDEREF
15936+ENTRY(pax_enter_kernel_user)
15937+ pushq %rdi
15938+ pushq %rbx
15939+
15940+#ifdef CONFIG_PARAVIRT
15941+ PV_SAVE_REGS(CLBR_RDI)
15942+#endif
15943+
15944+ GET_CR3_INTO_RDI
15945+ mov %rdi,%rbx
15946+ add $__START_KERNEL_map,%rbx
15947+ sub phys_base(%rip),%rbx
15948+
15949+#ifdef CONFIG_PARAVIRT
15950+ pushq %rdi
15951+ cmpl $0, pv_info+PARAVIRT_enabled
15952+ jz 1f
15953+ i = 0
15954+ .rept USER_PGD_PTRS
15955+ mov i*8(%rbx),%rsi
15956+ mov $0,%sil
15957+ lea i*8(%rbx),%rdi
15958+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15959+ i = i + 1
15960+ .endr
15961+ jmp 2f
15962+1:
15963+#endif
15964+
15965+ i = 0
15966+ .rept USER_PGD_PTRS
15967+ movb $0,i*8(%rbx)
15968+ i = i + 1
15969+ .endr
15970+
15971+#ifdef CONFIG_PARAVIRT
15972+2: popq %rdi
15973+#endif
15974+ SET_RDI_INTO_CR3
15975+
15976+#ifdef CONFIG_PAX_KERNEXEC
15977+ GET_CR0_INTO_RDI
15978+ bts $16,%rdi
15979+ SET_RDI_INTO_CR0
15980+#endif
15981+
15982+#ifdef CONFIG_PARAVIRT
15983+ PV_RESTORE_REGS(CLBR_RDI)
15984+#endif
15985+
15986+ popq %rbx
15987+ popq %rdi
15988+ pax_force_retaddr
15989+ retq
15990+ENDPROC(pax_enter_kernel_user)
15991+
15992+ENTRY(pax_exit_kernel_user)
15993+ push %rdi
15994+
15995+#ifdef CONFIG_PARAVIRT
15996+ pushq %rbx
15997+ PV_SAVE_REGS(CLBR_RDI)
15998+#endif
15999+
16000+#ifdef CONFIG_PAX_KERNEXEC
16001+ GET_CR0_INTO_RDI
16002+ btr $16,%rdi
16003+ SET_RDI_INTO_CR0
16004+#endif
16005+
16006+ GET_CR3_INTO_RDI
16007+ add $__START_KERNEL_map,%rdi
16008+ sub phys_base(%rip),%rdi
16009+
16010+#ifdef CONFIG_PARAVIRT
16011+ cmpl $0, pv_info+PARAVIRT_enabled
16012+ jz 1f
16013+ mov %rdi,%rbx
16014+ i = 0
16015+ .rept USER_PGD_PTRS
16016+ mov i*8(%rbx),%rsi
16017+ mov $0x67,%sil
16018+ lea i*8(%rbx),%rdi
16019+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16020+ i = i + 1
16021+ .endr
16022+ jmp 2f
16023+1:
16024+#endif
16025+
16026+ i = 0
16027+ .rept USER_PGD_PTRS
16028+ movb $0x67,i*8(%rdi)
16029+ i = i + 1
16030+ .endr
16031+
16032+#ifdef CONFIG_PARAVIRT
16033+2: PV_RESTORE_REGS(CLBR_RDI)
16034+ popq %rbx
16035+#endif
16036+
16037+ popq %rdi
16038+ pax_force_retaddr
16039+ retq
16040+ENDPROC(pax_exit_kernel_user)
16041+#endif
16042+
16043+.macro pax_erase_kstack
16044+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16045+ call pax_erase_kstack
16046+#endif
16047+.endm
16048+
16049+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16050+ENTRY(pax_erase_kstack)
16051+ pushq %rdi
16052+ pushq %rcx
16053+ pushq %rax
16054+ pushq %r11
16055+
16056+ GET_THREAD_INFO(%r11)
16057+ mov TI_lowest_stack(%r11), %rdi
16058+ mov $-0xBEEF, %rax
16059+ std
16060+
16061+1: mov %edi, %ecx
16062+ and $THREAD_SIZE_asm - 1, %ecx
16063+ shr $3, %ecx
16064+ repne scasq
16065+ jecxz 2f
16066+
16067+ cmp $2*8, %ecx
16068+ jc 2f
16069+
16070+ mov $2*8, %ecx
16071+ repe scasq
16072+ jecxz 2f
16073+ jne 1b
16074+
16075+2: cld
16076+ mov %esp, %ecx
16077+ sub %edi, %ecx
16078+
16079+ cmp $THREAD_SIZE_asm, %rcx
16080+ jb 3f
16081+ ud2
16082+3:
16083+
16084+ shr $3, %ecx
16085+ rep stosq
16086+
16087+ mov TI_task_thread_sp0(%r11), %rdi
16088+ sub $256, %rdi
16089+ mov %rdi, TI_lowest_stack(%r11)
16090+
16091+ popq %r11
16092+ popq %rax
16093+ popq %rcx
16094+ popq %rdi
16095+ pax_force_retaddr
16096+ ret
16097+ENDPROC(pax_erase_kstack)
16098+#endif
16099
16100 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16101 #ifdef CONFIG_TRACE_IRQFLAGS
16102@@ -271,8 +553,8 @@ ENDPROC(native_usergs_sysret64)
16103 .endm
16104
16105 .macro UNFAKE_STACK_FRAME
16106- addq $8*6, %rsp
16107- CFI_ADJUST_CFA_OFFSET -(6*8)
16108+ addq $8*6 + ARG_SKIP, %rsp
16109+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16110 .endm
16111
16112 /*
16113@@ -359,7 +641,7 @@ ENDPROC(native_usergs_sysret64)
16114 movq %rsp, %rsi
16115
16116 leaq -RBP(%rsp),%rdi /* arg1 for handler */
16117- testl $3, CS-RBP(%rsi)
16118+ testb $3, CS-RBP(%rsi)
16119 je 1f
16120 SWAPGS
16121 /*
16122@@ -394,9 +676,10 @@ ENTRY(save_rest)
16123 movq_cfi r15, R15+16
16124 movq %r11, 8(%rsp) /* return address */
16125 FIXUP_TOP_OF_STACK %r11, 16
16126+ pax_force_retaddr
16127 ret
16128 CFI_ENDPROC
16129-END(save_rest)
16130+ENDPROC(save_rest)
16131
16132 /* save complete stack frame */
16133 .pushsection .kprobes.text, "ax"
16134@@ -425,9 +708,10 @@ ENTRY(save_paranoid)
16135 js 1f /* negative -> in kernel */
16136 SWAPGS
16137 xorl %ebx,%ebx
16138-1: ret
16139+1: pax_force_retaddr_bts
16140+ ret
16141 CFI_ENDPROC
16142-END(save_paranoid)
16143+ENDPROC(save_paranoid)
16144 .popsection
16145
16146 /*
16147@@ -449,7 +733,7 @@ ENTRY(ret_from_fork)
16148
16149 RESTORE_REST
16150
16151- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16152+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16153 jz retint_restore_args
16154
16155 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16156@@ -459,7 +743,7 @@ ENTRY(ret_from_fork)
16157 jmp ret_from_sys_call # go to the SYSRET fastpath
16158
16159 CFI_ENDPROC
16160-END(ret_from_fork)
16161+ENDPROC(ret_from_fork)
16162
16163 /*
16164 * System call entry. Up to 6 arguments in registers are supported.
16165@@ -495,7 +779,7 @@ END(ret_from_fork)
16166 ENTRY(system_call)
16167 CFI_STARTPROC simple
16168 CFI_SIGNAL_FRAME
16169- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16170+ CFI_DEF_CFA rsp,0
16171 CFI_REGISTER rip,rcx
16172 /*CFI_REGISTER rflags,r11*/
16173 SWAPGS_UNSAFE_STACK
16174@@ -508,16 +792,23 @@ GLOBAL(system_call_after_swapgs)
16175
16176 movq %rsp,PER_CPU_VAR(old_rsp)
16177 movq PER_CPU_VAR(kernel_stack),%rsp
16178+ SAVE_ARGS 8*6,0
16179+ pax_enter_kernel_user
16180+
16181+#ifdef CONFIG_PAX_RANDKSTACK
16182+ pax_erase_kstack
16183+#endif
16184+
16185 /*
16186 * No need to follow this irqs off/on section - it's straight
16187 * and short:
16188 */
16189 ENABLE_INTERRUPTS(CLBR_NONE)
16190- SAVE_ARGS 8,0
16191 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16192 movq %rcx,RIP-ARGOFFSET(%rsp)
16193 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16194- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16195+ GET_THREAD_INFO(%rcx)
16196+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
16197 jnz tracesys
16198 system_call_fastpath:
16199 #if __SYSCALL_MASK == ~0
16200@@ -527,7 +818,7 @@ system_call_fastpath:
16201 cmpl $__NR_syscall_max,%eax
16202 #endif
16203 ja badsys
16204- movq %r10,%rcx
16205+ movq R10-ARGOFFSET(%rsp),%rcx
16206 call *sys_call_table(,%rax,8) # XXX: rip relative
16207 movq %rax,RAX-ARGOFFSET(%rsp)
16208 /*
16209@@ -541,10 +832,13 @@ sysret_check:
16210 LOCKDEP_SYS_EXIT
16211 DISABLE_INTERRUPTS(CLBR_NONE)
16212 TRACE_IRQS_OFF
16213- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
16214+ GET_THREAD_INFO(%rcx)
16215+ movl TI_flags(%rcx),%edx
16216 andl %edi,%edx
16217 jnz sysret_careful
16218 CFI_REMEMBER_STATE
16219+ pax_exit_kernel_user
16220+ pax_erase_kstack
16221 /*
16222 * sysretq will re-enable interrupts:
16223 */
16224@@ -596,14 +890,18 @@ badsys:
16225 * jump back to the normal fast path.
16226 */
16227 auditsys:
16228- movq %r10,%r9 /* 6th arg: 4th syscall arg */
16229+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16230 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16231 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16232 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16233 movq %rax,%rsi /* 2nd arg: syscall number */
16234 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16235 call __audit_syscall_entry
16236+
16237+ pax_erase_kstack
16238+
16239 LOAD_ARGS 0 /* reload call-clobbered registers */
16240+ pax_set_fptr_mask
16241 jmp system_call_fastpath
16242
16243 /*
16244@@ -624,7 +922,7 @@ sysret_audit:
16245 /* Do syscall tracing */
16246 tracesys:
16247 #ifdef CONFIG_AUDITSYSCALL
16248- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16249+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
16250 jz auditsys
16251 #endif
16252 SAVE_REST
16253@@ -632,12 +930,16 @@ tracesys:
16254 FIXUP_TOP_OF_STACK %rdi
16255 movq %rsp,%rdi
16256 call syscall_trace_enter
16257+
16258+ pax_erase_kstack
16259+
16260 /*
16261 * Reload arg registers from stack in case ptrace changed them.
16262 * We don't reload %rax because syscall_trace_enter() returned
16263 * the value it wants us to use in the table lookup.
16264 */
16265 LOAD_ARGS ARGOFFSET, 1
16266+ pax_set_fptr_mask
16267 RESTORE_REST
16268 #if __SYSCALL_MASK == ~0
16269 cmpq $__NR_syscall_max,%rax
16270@@ -646,7 +948,7 @@ tracesys:
16271 cmpl $__NR_syscall_max,%eax
16272 #endif
16273 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16274- movq %r10,%rcx /* fixup for C */
16275+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16276 call *sys_call_table(,%rax,8)
16277 movq %rax,RAX-ARGOFFSET(%rsp)
16278 /* Use IRET because user could have changed frame */
16279@@ -667,7 +969,9 @@ GLOBAL(int_with_check)
16280 andl %edi,%edx
16281 jnz int_careful
16282 andl $~TS_COMPAT,TI_status(%rcx)
16283- jmp retint_swapgs
16284+ pax_exit_kernel_user
16285+ pax_erase_kstack
16286+ jmp retint_swapgs_pax
16287
16288 /* Either reschedule or signal or syscall exit tracking needed. */
16289 /* First do a reschedule test. */
16290@@ -713,7 +1017,7 @@ int_restore_rest:
16291 TRACE_IRQS_OFF
16292 jmp int_with_check
16293 CFI_ENDPROC
16294-END(system_call)
16295+ENDPROC(system_call)
16296
16297 /*
16298 * Certain special system calls that need to save a complete full stack frame.
16299@@ -729,7 +1033,7 @@ ENTRY(\label)
16300 call \func
16301 jmp ptregscall_common
16302 CFI_ENDPROC
16303-END(\label)
16304+ENDPROC(\label)
16305 .endm
16306
16307 PTREGSCALL stub_clone, sys_clone, %r8
16308@@ -747,9 +1051,10 @@ ENTRY(ptregscall_common)
16309 movq_cfi_restore R12+8, r12
16310 movq_cfi_restore RBP+8, rbp
16311 movq_cfi_restore RBX+8, rbx
16312+ pax_force_retaddr
16313 ret $REST_SKIP /* pop extended registers */
16314 CFI_ENDPROC
16315-END(ptregscall_common)
16316+ENDPROC(ptregscall_common)
16317
16318 ENTRY(stub_execve)
16319 CFI_STARTPROC
16320@@ -764,7 +1069,7 @@ ENTRY(stub_execve)
16321 RESTORE_REST
16322 jmp int_ret_from_sys_call
16323 CFI_ENDPROC
16324-END(stub_execve)
16325+ENDPROC(stub_execve)
16326
16327 /*
16328 * sigreturn is special because it needs to restore all registers on return.
16329@@ -782,7 +1087,7 @@ ENTRY(stub_rt_sigreturn)
16330 RESTORE_REST
16331 jmp int_ret_from_sys_call
16332 CFI_ENDPROC
16333-END(stub_rt_sigreturn)
16334+ENDPROC(stub_rt_sigreturn)
16335
16336 #ifdef CONFIG_X86_X32_ABI
16337 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
16338@@ -851,7 +1156,7 @@ vector=vector+1
16339 2: jmp common_interrupt
16340 .endr
16341 CFI_ENDPROC
16342-END(irq_entries_start)
16343+ENDPROC(irq_entries_start)
16344
16345 .previous
16346 END(interrupt)
16347@@ -871,6 +1176,16 @@ END(interrupt)
16348 subq $ORIG_RAX-RBP, %rsp
16349 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
16350 SAVE_ARGS_IRQ
16351+#ifdef CONFIG_PAX_MEMORY_UDEREF
16352+ testb $3, CS(%rdi)
16353+ jnz 1f
16354+ pax_enter_kernel
16355+ jmp 2f
16356+1: pax_enter_kernel_user
16357+2:
16358+#else
16359+ pax_enter_kernel
16360+#endif
16361 call \func
16362 .endm
16363
16364@@ -902,7 +1217,7 @@ ret_from_intr:
16365
16366 exit_intr:
16367 GET_THREAD_INFO(%rcx)
16368- testl $3,CS-ARGOFFSET(%rsp)
16369+ testb $3,CS-ARGOFFSET(%rsp)
16370 je retint_kernel
16371
16372 /* Interrupt came from user space */
16373@@ -924,12 +1239,16 @@ retint_swapgs: /* return to user-space */
16374 * The iretq could re-enable interrupts:
16375 */
16376 DISABLE_INTERRUPTS(CLBR_ANY)
16377+ pax_exit_kernel_user
16378+retint_swapgs_pax:
16379 TRACE_IRQS_IRETQ
16380 SWAPGS
16381 jmp restore_args
16382
16383 retint_restore_args: /* return to kernel space */
16384 DISABLE_INTERRUPTS(CLBR_ANY)
16385+ pax_exit_kernel
16386+ pax_force_retaddr RIP-ARGOFFSET
16387 /*
16388 * The iretq could re-enable interrupts:
16389 */
16390@@ -1012,7 +1331,7 @@ ENTRY(retint_kernel)
16391 #endif
16392
16393 CFI_ENDPROC
16394-END(common_interrupt)
16395+ENDPROC(common_interrupt)
16396 /*
16397 * End of kprobes section
16398 */
16399@@ -1029,7 +1348,7 @@ ENTRY(\sym)
16400 interrupt \do_sym
16401 jmp ret_from_intr
16402 CFI_ENDPROC
16403-END(\sym)
16404+ENDPROC(\sym)
16405 .endm
16406
16407 #ifdef CONFIG_SMP
16408@@ -1102,12 +1421,22 @@ ENTRY(\sym)
16409 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16410 call error_entry
16411 DEFAULT_FRAME 0
16412+#ifdef CONFIG_PAX_MEMORY_UDEREF
16413+ testb $3, CS(%rsp)
16414+ jnz 1f
16415+ pax_enter_kernel
16416+ jmp 2f
16417+1: pax_enter_kernel_user
16418+2:
16419+#else
16420+ pax_enter_kernel
16421+#endif
16422 movq %rsp,%rdi /* pt_regs pointer */
16423 xorl %esi,%esi /* no error code */
16424 call \do_sym
16425 jmp error_exit /* %ebx: no swapgs flag */
16426 CFI_ENDPROC
16427-END(\sym)
16428+ENDPROC(\sym)
16429 .endm
16430
16431 .macro paranoidzeroentry sym do_sym
16432@@ -1119,15 +1448,25 @@ ENTRY(\sym)
16433 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16434 call save_paranoid
16435 TRACE_IRQS_OFF
16436+#ifdef CONFIG_PAX_MEMORY_UDEREF
16437+ testb $3, CS(%rsp)
16438+ jnz 1f
16439+ pax_enter_kernel
16440+ jmp 2f
16441+1: pax_enter_kernel_user
16442+2:
16443+#else
16444+ pax_enter_kernel
16445+#endif
16446 movq %rsp,%rdi /* pt_regs pointer */
16447 xorl %esi,%esi /* no error code */
16448 call \do_sym
16449 jmp paranoid_exit /* %ebx: no swapgs flag */
16450 CFI_ENDPROC
16451-END(\sym)
16452+ENDPROC(\sym)
16453 .endm
16454
16455-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
16456+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
16457 .macro paranoidzeroentry_ist sym do_sym ist
16458 ENTRY(\sym)
16459 INTR_FRAME
16460@@ -1137,14 +1476,30 @@ ENTRY(\sym)
16461 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16462 call save_paranoid
16463 TRACE_IRQS_OFF_DEBUG
16464+#ifdef CONFIG_PAX_MEMORY_UDEREF
16465+ testb $3, CS(%rsp)
16466+ jnz 1f
16467+ pax_enter_kernel
16468+ jmp 2f
16469+1: pax_enter_kernel_user
16470+2:
16471+#else
16472+ pax_enter_kernel
16473+#endif
16474 movq %rsp,%rdi /* pt_regs pointer */
16475 xorl %esi,%esi /* no error code */
16476+#ifdef CONFIG_SMP
16477+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
16478+ lea init_tss(%r12), %r12
16479+#else
16480+ lea init_tss(%rip), %r12
16481+#endif
16482 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16483 call \do_sym
16484 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16485 jmp paranoid_exit /* %ebx: no swapgs flag */
16486 CFI_ENDPROC
16487-END(\sym)
16488+ENDPROC(\sym)
16489 .endm
16490
16491 .macro errorentry sym do_sym
16492@@ -1155,13 +1510,23 @@ ENTRY(\sym)
16493 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16494 call error_entry
16495 DEFAULT_FRAME 0
16496+#ifdef CONFIG_PAX_MEMORY_UDEREF
16497+ testb $3, CS(%rsp)
16498+ jnz 1f
16499+ pax_enter_kernel
16500+ jmp 2f
16501+1: pax_enter_kernel_user
16502+2:
16503+#else
16504+ pax_enter_kernel
16505+#endif
16506 movq %rsp,%rdi /* pt_regs pointer */
16507 movq ORIG_RAX(%rsp),%rsi /* get error code */
16508 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16509 call \do_sym
16510 jmp error_exit /* %ebx: no swapgs flag */
16511 CFI_ENDPROC
16512-END(\sym)
16513+ENDPROC(\sym)
16514 .endm
16515
16516 /* error code is on the stack already */
16517@@ -1174,13 +1539,23 @@ ENTRY(\sym)
16518 call save_paranoid
16519 DEFAULT_FRAME 0
16520 TRACE_IRQS_OFF
16521+#ifdef CONFIG_PAX_MEMORY_UDEREF
16522+ testb $3, CS(%rsp)
16523+ jnz 1f
16524+ pax_enter_kernel
16525+ jmp 2f
16526+1: pax_enter_kernel_user
16527+2:
16528+#else
16529+ pax_enter_kernel
16530+#endif
16531 movq %rsp,%rdi /* pt_regs pointer */
16532 movq ORIG_RAX(%rsp),%rsi /* get error code */
16533 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16534 call \do_sym
16535 jmp paranoid_exit /* %ebx: no swapgs flag */
16536 CFI_ENDPROC
16537-END(\sym)
16538+ENDPROC(\sym)
16539 .endm
16540
16541 zeroentry divide_error do_divide_error
16542@@ -1210,9 +1585,10 @@ gs_change:
16543 2: mfence /* workaround */
16544 SWAPGS
16545 popfq_cfi
16546+ pax_force_retaddr
16547 ret
16548 CFI_ENDPROC
16549-END(native_load_gs_index)
16550+ENDPROC(native_load_gs_index)
16551
16552 _ASM_EXTABLE(gs_change,bad_gs)
16553 .section .fixup,"ax"
16554@@ -1231,13 +1607,14 @@ ENTRY(kernel_thread_helper)
16555 * Here we are in the child and the registers are set as they were
16556 * at kernel_thread() invocation in the parent.
16557 */
16558+ pax_force_fptr %rsi
16559 call *%rsi
16560 # exit
16561 mov %eax, %edi
16562 call do_exit
16563 ud2 # padding for call trace
16564 CFI_ENDPROC
16565-END(kernel_thread_helper)
16566+ENDPROC(kernel_thread_helper)
16567
16568 /*
16569 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16570@@ -1264,11 +1641,11 @@ ENTRY(kernel_execve)
16571 RESTORE_REST
16572 testq %rax,%rax
16573 je int_ret_from_sys_call
16574- RESTORE_ARGS
16575 UNFAKE_STACK_FRAME
16576+ pax_force_retaddr
16577 ret
16578 CFI_ENDPROC
16579-END(kernel_execve)
16580+ENDPROC(kernel_execve)
16581
16582 /* Call softirq on interrupt stack. Interrupts are off. */
16583 ENTRY(call_softirq)
16584@@ -1286,9 +1663,10 @@ ENTRY(call_softirq)
16585 CFI_DEF_CFA_REGISTER rsp
16586 CFI_ADJUST_CFA_OFFSET -8
16587 decl PER_CPU_VAR(irq_count)
16588+ pax_force_retaddr
16589 ret
16590 CFI_ENDPROC
16591-END(call_softirq)
16592+ENDPROC(call_softirq)
16593
16594 #ifdef CONFIG_XEN
16595 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16596@@ -1326,7 +1704,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16597 decl PER_CPU_VAR(irq_count)
16598 jmp error_exit
16599 CFI_ENDPROC
16600-END(xen_do_hypervisor_callback)
16601+ENDPROC(xen_do_hypervisor_callback)
16602
16603 /*
16604 * Hypervisor uses this for application faults while it executes.
16605@@ -1385,7 +1763,7 @@ ENTRY(xen_failsafe_callback)
16606 SAVE_ALL
16607 jmp error_exit
16608 CFI_ENDPROC
16609-END(xen_failsafe_callback)
16610+ENDPROC(xen_failsafe_callback)
16611
16612 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16613 xen_hvm_callback_vector xen_evtchn_do_upcall
16614@@ -1434,16 +1812,31 @@ ENTRY(paranoid_exit)
16615 TRACE_IRQS_OFF_DEBUG
16616 testl %ebx,%ebx /* swapgs needed? */
16617 jnz paranoid_restore
16618- testl $3,CS(%rsp)
16619+ testb $3,CS(%rsp)
16620 jnz paranoid_userspace
16621+#ifdef CONFIG_PAX_MEMORY_UDEREF
16622+ pax_exit_kernel
16623+ TRACE_IRQS_IRETQ 0
16624+ SWAPGS_UNSAFE_STACK
16625+ RESTORE_ALL 8
16626+ pax_force_retaddr_bts
16627+ jmp irq_return
16628+#endif
16629 paranoid_swapgs:
16630+#ifdef CONFIG_PAX_MEMORY_UDEREF
16631+ pax_exit_kernel_user
16632+#else
16633+ pax_exit_kernel
16634+#endif
16635 TRACE_IRQS_IRETQ 0
16636 SWAPGS_UNSAFE_STACK
16637 RESTORE_ALL 8
16638 jmp irq_return
16639 paranoid_restore:
16640+ pax_exit_kernel
16641 TRACE_IRQS_IRETQ_DEBUG 0
16642 RESTORE_ALL 8
16643+ pax_force_retaddr_bts
16644 jmp irq_return
16645 paranoid_userspace:
16646 GET_THREAD_INFO(%rcx)
16647@@ -1472,7 +1865,7 @@ paranoid_schedule:
16648 TRACE_IRQS_OFF
16649 jmp paranoid_userspace
16650 CFI_ENDPROC
16651-END(paranoid_exit)
16652+ENDPROC(paranoid_exit)
16653
16654 /*
16655 * Exception entry point. This expects an error code/orig_rax on the stack.
16656@@ -1499,12 +1892,13 @@ ENTRY(error_entry)
16657 movq_cfi r14, R14+8
16658 movq_cfi r15, R15+8
16659 xorl %ebx,%ebx
16660- testl $3,CS+8(%rsp)
16661+ testb $3,CS+8(%rsp)
16662 je error_kernelspace
16663 error_swapgs:
16664 SWAPGS
16665 error_sti:
16666 TRACE_IRQS_OFF
16667+ pax_force_retaddr_bts
16668 ret
16669
16670 /*
16671@@ -1531,7 +1925,7 @@ bstep_iret:
16672 movq %rcx,RIP+8(%rsp)
16673 jmp error_swapgs
16674 CFI_ENDPROC
16675-END(error_entry)
16676+ENDPROC(error_entry)
16677
16678
16679 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16680@@ -1551,7 +1945,7 @@ ENTRY(error_exit)
16681 jnz retint_careful
16682 jmp retint_swapgs
16683 CFI_ENDPROC
16684-END(error_exit)
16685+ENDPROC(error_exit)
16686
16687 /*
16688 * Test if a given stack is an NMI stack or not.
16689@@ -1609,9 +2003,11 @@ ENTRY(nmi)
16690 * If %cs was not the kernel segment, then the NMI triggered in user
16691 * space, which means it is definitely not nested.
16692 */
16693+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16694+ je 1f
16695 cmpl $__KERNEL_CS, 16(%rsp)
16696 jne first_nmi
16697-
16698+1:
16699 /*
16700 * Check the special variable on the stack to see if NMIs are
16701 * executing.
16702@@ -1758,6 +2154,16 @@ end_repeat_nmi:
16703 */
16704 call save_paranoid
16705 DEFAULT_FRAME 0
16706+#ifdef CONFIG_PAX_MEMORY_UDEREF
16707+ testb $3, CS(%rsp)
16708+ jnz 1f
16709+ pax_enter_kernel
16710+ jmp 2f
16711+1: pax_enter_kernel_user
16712+2:
16713+#else
16714+ pax_enter_kernel
16715+#endif
16716 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16717 movq %rsp,%rdi
16718 movq $-1,%rsi
16719@@ -1765,21 +2171,32 @@ end_repeat_nmi:
16720 testl %ebx,%ebx /* swapgs needed? */
16721 jnz nmi_restore
16722 nmi_swapgs:
16723+#ifdef CONFIG_PAX_MEMORY_UDEREF
16724+ pax_exit_kernel_user
16725+#else
16726+ pax_exit_kernel
16727+#endif
16728 SWAPGS_UNSAFE_STACK
16729+ RESTORE_ALL 8
16730+ /* Clear the NMI executing stack variable */
16731+ movq $0, 10*8(%rsp)
16732+ jmp irq_return
16733 nmi_restore:
16734+ pax_exit_kernel
16735 RESTORE_ALL 8
16736+ pax_force_retaddr_bts
16737 /* Clear the NMI executing stack variable */
16738 movq $0, 10*8(%rsp)
16739 jmp irq_return
16740 CFI_ENDPROC
16741-END(nmi)
16742+ENDPROC(nmi)
16743
16744 ENTRY(ignore_sysret)
16745 CFI_STARTPROC
16746 mov $-ENOSYS,%eax
16747 sysret
16748 CFI_ENDPROC
16749-END(ignore_sysret)
16750+ENDPROC(ignore_sysret)
16751
16752 /*
16753 * End of kprobes section
16754diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16755index c3a7cb4..3ad00dc 100644
16756--- a/arch/x86/kernel/ftrace.c
16757+++ b/arch/x86/kernel/ftrace.c
16758@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
16759 {
16760 unsigned char replaced[MCOUNT_INSN_SIZE];
16761
16762+ ip = ktla_ktva(ip);
16763+
16764 /*
16765 * Note: Due to modules and __init, code can
16766 * disappear and change, we need to protect against faulting
16767@@ -212,7 +214,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16768 unsigned char old[MCOUNT_INSN_SIZE], *new;
16769 int ret;
16770
16771- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16772+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16773 new = ftrace_call_replace(ip, (unsigned long)func);
16774
16775 /* See comment above by declaration of modifying_ftrace_code */
16776@@ -605,6 +607,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16777 {
16778 unsigned char code[MCOUNT_INSN_SIZE];
16779
16780+ ip = ktla_ktva(ip);
16781+
16782 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16783 return -EFAULT;
16784
16785diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16786index c18f59d..9c0c9f6 100644
16787--- a/arch/x86/kernel/head32.c
16788+++ b/arch/x86/kernel/head32.c
16789@@ -18,6 +18,7 @@
16790 #include <asm/io_apic.h>
16791 #include <asm/bios_ebda.h>
16792 #include <asm/tlbflush.h>
16793+#include <asm/boot.h>
16794
16795 static void __init i386_default_early_setup(void)
16796 {
16797@@ -30,8 +31,7 @@ static void __init i386_default_early_setup(void)
16798
16799 void __init i386_start_kernel(void)
16800 {
16801- memblock_reserve(__pa_symbol(&_text),
16802- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16803+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16804
16805 #ifdef CONFIG_BLK_DEV_INITRD
16806 /* Reserve INITRD */
16807diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16808index d42ab17..cb1b997 100644
16809--- a/arch/x86/kernel/head_32.S
16810+++ b/arch/x86/kernel/head_32.S
16811@@ -26,6 +26,12 @@
16812 /* Physical address */
16813 #define pa(X) ((X) - __PAGE_OFFSET)
16814
16815+#ifdef CONFIG_PAX_KERNEXEC
16816+#define ta(X) (X)
16817+#else
16818+#define ta(X) ((X) - __PAGE_OFFSET)
16819+#endif
16820+
16821 /*
16822 * References to members of the new_cpu_data structure.
16823 */
16824@@ -55,11 +61,7 @@
16825 * and small than max_low_pfn, otherwise will waste some page table entries
16826 */
16827
16828-#if PTRS_PER_PMD > 1
16829-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16830-#else
16831-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16832-#endif
16833+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16834
16835 /* Number of possible pages in the lowmem region */
16836 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16837@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16838 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16839
16840 /*
16841+ * Real beginning of normal "text" segment
16842+ */
16843+ENTRY(stext)
16844+ENTRY(_stext)
16845+
16846+/*
16847 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16848 * %esi points to the real-mode code as a 32-bit pointer.
16849 * CS and DS must be 4 GB flat segments, but we don't depend on
16850@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16851 * can.
16852 */
16853 __HEAD
16854+
16855+#ifdef CONFIG_PAX_KERNEXEC
16856+ jmp startup_32
16857+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16858+.fill PAGE_SIZE-5,1,0xcc
16859+#endif
16860+
16861 ENTRY(startup_32)
16862 movl pa(stack_start),%ecx
16863
16864@@ -106,6 +121,57 @@ ENTRY(startup_32)
16865 2:
16866 leal -__PAGE_OFFSET(%ecx),%esp
16867
16868+#ifdef CONFIG_SMP
16869+ movl $pa(cpu_gdt_table),%edi
16870+ movl $__per_cpu_load,%eax
16871+ movw %ax,__KERNEL_PERCPU + 2(%edi)
16872+ rorl $16,%eax
16873+ movb %al,__KERNEL_PERCPU + 4(%edi)
16874+ movb %ah,__KERNEL_PERCPU + 7(%edi)
16875+ movl $__per_cpu_end - 1,%eax
16876+ subl $__per_cpu_start,%eax
16877+ movw %ax,__KERNEL_PERCPU + 0(%edi)
16878+#endif
16879+
16880+#ifdef CONFIG_PAX_MEMORY_UDEREF
16881+ movl $NR_CPUS,%ecx
16882+ movl $pa(cpu_gdt_table),%edi
16883+1:
16884+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16885+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16886+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16887+ addl $PAGE_SIZE_asm,%edi
16888+ loop 1b
16889+#endif
16890+
16891+#ifdef CONFIG_PAX_KERNEXEC
16892+ movl $pa(boot_gdt),%edi
16893+ movl $__LOAD_PHYSICAL_ADDR,%eax
16894+ movw %ax,__BOOT_CS + 2(%edi)
16895+ rorl $16,%eax
16896+ movb %al,__BOOT_CS + 4(%edi)
16897+ movb %ah,__BOOT_CS + 7(%edi)
16898+ rorl $16,%eax
16899+
16900+ ljmp $(__BOOT_CS),$1f
16901+1:
16902+
16903+ movl $NR_CPUS,%ecx
16904+ movl $pa(cpu_gdt_table),%edi
16905+ addl $__PAGE_OFFSET,%eax
16906+1:
16907+ movw %ax,__KERNEL_CS + 2(%edi)
16908+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16909+ rorl $16,%eax
16910+ movb %al,__KERNEL_CS + 4(%edi)
16911+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16912+ movb %ah,__KERNEL_CS + 7(%edi)
16913+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16914+ rorl $16,%eax
16915+ addl $PAGE_SIZE_asm,%edi
16916+ loop 1b
16917+#endif
16918+
16919 /*
16920 * Clear BSS first so that there are no surprises...
16921 */
16922@@ -196,8 +262,11 @@ ENTRY(startup_32)
16923 movl %eax, pa(max_pfn_mapped)
16924
16925 /* Do early initialization of the fixmap area */
16926- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16927- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16928+#ifdef CONFIG_COMPAT_VDSO
16929+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16930+#else
16931+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16932+#endif
16933 #else /* Not PAE */
16934
16935 page_pde_offset = (__PAGE_OFFSET >> 20);
16936@@ -227,8 +296,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16937 movl %eax, pa(max_pfn_mapped)
16938
16939 /* Do early initialization of the fixmap area */
16940- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16941- movl %eax,pa(initial_page_table+0xffc)
16942+#ifdef CONFIG_COMPAT_VDSO
16943+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16944+#else
16945+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16946+#endif
16947 #endif
16948
16949 #ifdef CONFIG_PARAVIRT
16950@@ -242,9 +314,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16951 cmpl $num_subarch_entries, %eax
16952 jae bad_subarch
16953
16954- movl pa(subarch_entries)(,%eax,4), %eax
16955- subl $__PAGE_OFFSET, %eax
16956- jmp *%eax
16957+ jmp *pa(subarch_entries)(,%eax,4)
16958
16959 bad_subarch:
16960 WEAK(lguest_entry)
16961@@ -256,10 +326,10 @@ WEAK(xen_entry)
16962 __INITDATA
16963
16964 subarch_entries:
16965- .long default_entry /* normal x86/PC */
16966- .long lguest_entry /* lguest hypervisor */
16967- .long xen_entry /* Xen hypervisor */
16968- .long default_entry /* Moorestown MID */
16969+ .long ta(default_entry) /* normal x86/PC */
16970+ .long ta(lguest_entry) /* lguest hypervisor */
16971+ .long ta(xen_entry) /* Xen hypervisor */
16972+ .long ta(default_entry) /* Moorestown MID */
16973 num_subarch_entries = (. - subarch_entries) / 4
16974 .previous
16975 #else
16976@@ -310,6 +380,7 @@ default_entry:
16977 orl %edx,%eax
16978 movl %eax,%cr4
16979
16980+#ifdef CONFIG_X86_PAE
16981 testb $X86_CR4_PAE, %al # check if PAE is enabled
16982 jz 6f
16983
16984@@ -338,6 +409,9 @@ default_entry:
16985 /* Make changes effective */
16986 wrmsr
16987
16988+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16989+#endif
16990+
16991 6:
16992
16993 /*
16994@@ -436,14 +510,20 @@ is386: movl $2,%ecx # set MP
16995 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16996 movl %eax,%ss # after changing gdt.
16997
16998- movl $(__USER_DS),%eax # DS/ES contains default USER segment
16999+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17000 movl %eax,%ds
17001 movl %eax,%es
17002
17003 movl $(__KERNEL_PERCPU), %eax
17004 movl %eax,%fs # set this cpu's percpu
17005
17006+#ifdef CONFIG_CC_STACKPROTECTOR
17007 movl $(__KERNEL_STACK_CANARY),%eax
17008+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17009+ movl $(__USER_DS),%eax
17010+#else
17011+ xorl %eax,%eax
17012+#endif
17013 movl %eax,%gs
17014
17015 xorl %eax,%eax # Clear LDT
17016@@ -520,8 +600,11 @@ setup_once:
17017 * relocation. Manually set base address in stack canary
17018 * segment descriptor.
17019 */
17020- movl $gdt_page,%eax
17021+ movl $cpu_gdt_table,%eax
17022 movl $stack_canary,%ecx
17023+#ifdef CONFIG_SMP
17024+ addl $__per_cpu_load,%ecx
17025+#endif
17026 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17027 shrl $16, %ecx
17028 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17029@@ -552,7 +635,7 @@ ENDPROC(early_idt_handlers)
17030 /* This is global to keep gas from relaxing the jumps */
17031 ENTRY(early_idt_handler)
17032 cld
17033- cmpl $2,%ss:early_recursion_flag
17034+ cmpl $1,%ss:early_recursion_flag
17035 je hlt_loop
17036 incl %ss:early_recursion_flag
17037
17038@@ -590,8 +673,8 @@ ENTRY(early_idt_handler)
17039 pushl (20+6*4)(%esp) /* trapno */
17040 pushl $fault_msg
17041 call printk
17042-#endif
17043 call dump_stack
17044+#endif
17045 hlt_loop:
17046 hlt
17047 jmp hlt_loop
17048@@ -610,8 +693,11 @@ ENDPROC(early_idt_handler)
17049 /* This is the default interrupt "handler" :-) */
17050 ALIGN
17051 ignore_int:
17052- cld
17053 #ifdef CONFIG_PRINTK
17054+ cmpl $2,%ss:early_recursion_flag
17055+ je hlt_loop
17056+ incl %ss:early_recursion_flag
17057+ cld
17058 pushl %eax
17059 pushl %ecx
17060 pushl %edx
17061@@ -620,9 +706,6 @@ ignore_int:
17062 movl $(__KERNEL_DS),%eax
17063 movl %eax,%ds
17064 movl %eax,%es
17065- cmpl $2,early_recursion_flag
17066- je hlt_loop
17067- incl early_recursion_flag
17068 pushl 16(%esp)
17069 pushl 24(%esp)
17070 pushl 32(%esp)
17071@@ -656,29 +739,43 @@ ENTRY(setup_once_ref)
17072 /*
17073 * BSS section
17074 */
17075-__PAGE_ALIGNED_BSS
17076- .align PAGE_SIZE
17077 #ifdef CONFIG_X86_PAE
17078+.section .initial_pg_pmd,"a",@progbits
17079 initial_pg_pmd:
17080 .fill 1024*KPMDS,4,0
17081 #else
17082+.section .initial_page_table,"a",@progbits
17083 ENTRY(initial_page_table)
17084 .fill 1024,4,0
17085 #endif
17086+.section .initial_pg_fixmap,"a",@progbits
17087 initial_pg_fixmap:
17088 .fill 1024,4,0
17089+.section .empty_zero_page,"a",@progbits
17090 ENTRY(empty_zero_page)
17091 .fill 4096,1,0
17092+.section .swapper_pg_dir,"a",@progbits
17093 ENTRY(swapper_pg_dir)
17094+#ifdef CONFIG_X86_PAE
17095+ .fill 4,8,0
17096+#else
17097 .fill 1024,4,0
17098+#endif
17099+
17100+/*
17101+ * The IDT has to be page-aligned to simplify the Pentium
17102+ * F0 0F bug workaround.. We have a special link segment
17103+ * for this.
17104+ */
17105+.section .idt,"a",@progbits
17106+ENTRY(idt_table)
17107+ .fill 256,8,0
17108
17109 /*
17110 * This starts the data section.
17111 */
17112 #ifdef CONFIG_X86_PAE
17113-__PAGE_ALIGNED_DATA
17114- /* Page-aligned for the benefit of paravirt? */
17115- .align PAGE_SIZE
17116+.section .initial_page_table,"a",@progbits
17117 ENTRY(initial_page_table)
17118 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17119 # if KPMDS == 3
17120@@ -697,12 +794,20 @@ ENTRY(initial_page_table)
17121 # error "Kernel PMDs should be 1, 2 or 3"
17122 # endif
17123 .align PAGE_SIZE /* needs to be page-sized too */
17124+
17125+#ifdef CONFIG_PAX_PER_CPU_PGD
17126+ENTRY(cpu_pgd)
17127+ .rept NR_CPUS
17128+ .fill 4,8,0
17129+ .endr
17130+#endif
17131+
17132 #endif
17133
17134 .data
17135 .balign 4
17136 ENTRY(stack_start)
17137- .long init_thread_union+THREAD_SIZE
17138+ .long init_thread_union+THREAD_SIZE-8
17139
17140 __INITRODATA
17141 int_msg:
17142@@ -730,7 +835,7 @@ fault_msg:
17143 * segment size, and 32-bit linear address value:
17144 */
17145
17146- .data
17147+.section .rodata,"a",@progbits
17148 .globl boot_gdt_descr
17149 .globl idt_descr
17150
17151@@ -739,7 +844,7 @@ fault_msg:
17152 .word 0 # 32 bit align gdt_desc.address
17153 boot_gdt_descr:
17154 .word __BOOT_DS+7
17155- .long boot_gdt - __PAGE_OFFSET
17156+ .long pa(boot_gdt)
17157
17158 .word 0 # 32-bit align idt_desc.address
17159 idt_descr:
17160@@ -750,7 +855,7 @@ idt_descr:
17161 .word 0 # 32 bit align gdt_desc.address
17162 ENTRY(early_gdt_descr)
17163 .word GDT_ENTRIES*8-1
17164- .long gdt_page /* Overwritten for secondary CPUs */
17165+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
17166
17167 /*
17168 * The boot_gdt must mirror the equivalent in setup.S and is
17169@@ -759,5 +864,65 @@ ENTRY(early_gdt_descr)
17170 .align L1_CACHE_BYTES
17171 ENTRY(boot_gdt)
17172 .fill GDT_ENTRY_BOOT_CS,8,0
17173- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17174- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17175+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17176+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17177+
17178+ .align PAGE_SIZE_asm
17179+ENTRY(cpu_gdt_table)
17180+ .rept NR_CPUS
17181+ .quad 0x0000000000000000 /* NULL descriptor */
17182+ .quad 0x0000000000000000 /* 0x0b reserved */
17183+ .quad 0x0000000000000000 /* 0x13 reserved */
17184+ .quad 0x0000000000000000 /* 0x1b reserved */
17185+
17186+#ifdef CONFIG_PAX_KERNEXEC
17187+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17188+#else
17189+ .quad 0x0000000000000000 /* 0x20 unused */
17190+#endif
17191+
17192+ .quad 0x0000000000000000 /* 0x28 unused */
17193+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17194+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17195+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17196+ .quad 0x0000000000000000 /* 0x4b reserved */
17197+ .quad 0x0000000000000000 /* 0x53 reserved */
17198+ .quad 0x0000000000000000 /* 0x5b reserved */
17199+
17200+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17201+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17202+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17203+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17204+
17205+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17206+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17207+
17208+ /*
17209+ * Segments used for calling PnP BIOS have byte granularity.
17210+ * The code segments and data segments have fixed 64k limits,
17211+ * the transfer segment sizes are set at run time.
17212+ */
17213+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
17214+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
17215+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
17216+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
17217+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
17218+
17219+ /*
17220+ * The APM segments have byte granularity and their bases
17221+ * are set at run time. All have 64k limits.
17222+ */
17223+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17224+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17225+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
17226+
17227+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17228+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17229+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17230+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17231+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17232+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17233+
17234+ /* Be sure this is zeroed to avoid false validations in Xen */
17235+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17236+ .endr
17237diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17238index 94bf9cc..400455a 100644
17239--- a/arch/x86/kernel/head_64.S
17240+++ b/arch/x86/kernel/head_64.S
17241@@ -20,6 +20,8 @@
17242 #include <asm/processor-flags.h>
17243 #include <asm/percpu.h>
17244 #include <asm/nops.h>
17245+#include <asm/cpufeature.h>
17246+#include <asm/alternative-asm.h>
17247
17248 #ifdef CONFIG_PARAVIRT
17249 #include <asm/asm-offsets.h>
17250@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17251 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17252 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17253 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17254+L4_VMALLOC_START = pgd_index(VMALLOC_START)
17255+L3_VMALLOC_START = pud_index(VMALLOC_START)
17256+L4_VMALLOC_END = pgd_index(VMALLOC_END)
17257+L3_VMALLOC_END = pud_index(VMALLOC_END)
17258+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17259+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17260
17261 .text
17262 __HEAD
17263@@ -88,35 +96,23 @@ startup_64:
17264 */
17265 addq %rbp, init_level4_pgt + 0(%rip)
17266 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17267+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17268+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17269+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17270 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17271
17272 addq %rbp, level3_ident_pgt + 0(%rip)
17273+#ifndef CONFIG_XEN
17274+ addq %rbp, level3_ident_pgt + 8(%rip)
17275+#endif
17276
17277- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17278- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17279+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17280+
17281+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17282+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17283
17284 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17285-
17286- /* Add an Identity mapping if I am above 1G */
17287- leaq _text(%rip), %rdi
17288- andq $PMD_PAGE_MASK, %rdi
17289-
17290- movq %rdi, %rax
17291- shrq $PUD_SHIFT, %rax
17292- andq $(PTRS_PER_PUD - 1), %rax
17293- jz ident_complete
17294-
17295- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17296- leaq level3_ident_pgt(%rip), %rbx
17297- movq %rdx, 0(%rbx, %rax, 8)
17298-
17299- movq %rdi, %rax
17300- shrq $PMD_SHIFT, %rax
17301- andq $(PTRS_PER_PMD - 1), %rax
17302- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17303- leaq level2_spare_pgt(%rip), %rbx
17304- movq %rdx, 0(%rbx, %rax, 8)
17305-ident_complete:
17306+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17307
17308 /*
17309 * Fixup the kernel text+data virtual addresses. Note that
17310@@ -159,8 +155,8 @@ ENTRY(secondary_startup_64)
17311 * after the boot processor executes this code.
17312 */
17313
17314- /* Enable PAE mode and PGE */
17315- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17316+ /* Enable PAE mode and PSE/PGE */
17317+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17318 movq %rax, %cr4
17319
17320 /* Setup early boot stage 4 level pagetables. */
17321@@ -182,9 +178,17 @@ ENTRY(secondary_startup_64)
17322 movl $MSR_EFER, %ecx
17323 rdmsr
17324 btsl $_EFER_SCE, %eax /* Enable System Call */
17325- btl $20,%edi /* No Execute supported? */
17326+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17327 jnc 1f
17328 btsl $_EFER_NX, %eax
17329+ leaq init_level4_pgt(%rip), %rdi
17330+#ifndef CONFIG_EFI
17331+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17332+#endif
17333+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17334+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17335+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17336+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
17337 1: wrmsr /* Make changes effective */
17338
17339 /* Setup cr0 */
17340@@ -246,6 +250,7 @@ ENTRY(secondary_startup_64)
17341 * jump. In addition we need to ensure %cs is set so we make this
17342 * a far return.
17343 */
17344+ pax_set_fptr_mask
17345 movq initial_code(%rip),%rax
17346 pushq $0 # fake return address to stop unwinder
17347 pushq $__KERNEL_CS # set correct cs
17348@@ -268,7 +273,7 @@ ENTRY(secondary_startup_64)
17349 bad_address:
17350 jmp bad_address
17351
17352- .section ".init.text","ax"
17353+ __INIT
17354 .globl early_idt_handlers
17355 early_idt_handlers:
17356 # 104(%rsp) %rflags
17357@@ -347,11 +352,15 @@ ENTRY(early_idt_handler)
17358 addq $16,%rsp # drop vector number and error code
17359 decl early_recursion_flag(%rip)
17360 INTERRUPT_RETURN
17361+ .previous
17362
17363+ __INITDATA
17364 .balign 4
17365 early_recursion_flag:
17366 .long 0
17367+ .previous
17368
17369+ .section .rodata,"a",@progbits
17370 #ifdef CONFIG_EARLY_PRINTK
17371 early_idt_msg:
17372 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17373@@ -360,6 +369,7 @@ early_idt_ripmsg:
17374 #endif /* CONFIG_EARLY_PRINTK */
17375 .previous
17376
17377+ .section .rodata,"a",@progbits
17378 #define NEXT_PAGE(name) \
17379 .balign PAGE_SIZE; \
17380 ENTRY(name)
17381@@ -372,7 +382,6 @@ ENTRY(name)
17382 i = i + 1 ; \
17383 .endr
17384
17385- .data
17386 /*
17387 * This default setting generates an ident mapping at address 0x100000
17388 * and a mapping for the kernel that precisely maps virtual address
17389@@ -383,13 +392,41 @@ NEXT_PAGE(init_level4_pgt)
17390 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17391 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17392 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17393+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
17394+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17395+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
17396+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17397+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17398+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17399 .org init_level4_pgt + L4_START_KERNEL*8, 0
17400 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17401 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17402
17403+#ifdef CONFIG_PAX_PER_CPU_PGD
17404+NEXT_PAGE(cpu_pgd)
17405+ .rept NR_CPUS
17406+ .fill 512,8,0
17407+ .endr
17408+#endif
17409+
17410 NEXT_PAGE(level3_ident_pgt)
17411 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17412+#ifdef CONFIG_XEN
17413 .fill 511,8,0
17414+#else
17415+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17416+ .fill 510,8,0
17417+#endif
17418+
17419+NEXT_PAGE(level3_vmalloc_start_pgt)
17420+ .fill 512,8,0
17421+
17422+NEXT_PAGE(level3_vmalloc_end_pgt)
17423+ .fill 512,8,0
17424+
17425+NEXT_PAGE(level3_vmemmap_pgt)
17426+ .fill L3_VMEMMAP_START,8,0
17427+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17428
17429 NEXT_PAGE(level3_kernel_pgt)
17430 .fill L3_START_KERNEL,8,0
17431@@ -397,20 +434,23 @@ NEXT_PAGE(level3_kernel_pgt)
17432 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17433 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17434
17435+NEXT_PAGE(level2_vmemmap_pgt)
17436+ .fill 512,8,0
17437+
17438 NEXT_PAGE(level2_fixmap_pgt)
17439- .fill 506,8,0
17440- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17441- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17442- .fill 5,8,0
17443+ .fill 507,8,0
17444+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17445+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17446+ .fill 4,8,0
17447
17448-NEXT_PAGE(level1_fixmap_pgt)
17449+NEXT_PAGE(level1_vsyscall_pgt)
17450 .fill 512,8,0
17451
17452-NEXT_PAGE(level2_ident_pgt)
17453- /* Since I easily can, map the first 1G.
17454+ /* Since I easily can, map the first 2G.
17455 * Don't set NX because code runs from these pages.
17456 */
17457- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17458+NEXT_PAGE(level2_ident_pgt)
17459+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17460
17461 NEXT_PAGE(level2_kernel_pgt)
17462 /*
17463@@ -423,37 +463,59 @@ NEXT_PAGE(level2_kernel_pgt)
17464 * If you want to increase this then increase MODULES_VADDR
17465 * too.)
17466 */
17467- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17468- KERNEL_IMAGE_SIZE/PMD_SIZE)
17469-
17470-NEXT_PAGE(level2_spare_pgt)
17471- .fill 512, 8, 0
17472+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17473
17474 #undef PMDS
17475 #undef NEXT_PAGE
17476
17477- .data
17478+ .align PAGE_SIZE
17479+ENTRY(cpu_gdt_table)
17480+ .rept NR_CPUS
17481+ .quad 0x0000000000000000 /* NULL descriptor */
17482+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17483+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
17484+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
17485+ .quad 0x00cffb000000ffff /* __USER32_CS */
17486+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17487+ .quad 0x00affb000000ffff /* __USER_CS */
17488+
17489+#ifdef CONFIG_PAX_KERNEXEC
17490+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17491+#else
17492+ .quad 0x0 /* unused */
17493+#endif
17494+
17495+ .quad 0,0 /* TSS */
17496+ .quad 0,0 /* LDT */
17497+ .quad 0,0,0 /* three TLS descriptors */
17498+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
17499+ /* asm/segment.h:GDT_ENTRIES must match this */
17500+
17501+ /* zero the remaining page */
17502+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17503+ .endr
17504+
17505 .align 16
17506 .globl early_gdt_descr
17507 early_gdt_descr:
17508 .word GDT_ENTRIES*8-1
17509 early_gdt_descr_base:
17510- .quad INIT_PER_CPU_VAR(gdt_page)
17511+ .quad cpu_gdt_table
17512
17513 ENTRY(phys_base)
17514 /* This must match the first entry in level2_kernel_pgt */
17515 .quad 0x0000000000000000
17516
17517 #include "../../x86/xen/xen-head.S"
17518-
17519- .section .bss, "aw", @nobits
17520+
17521+ .section .rodata,"a",@progbits
17522 .align L1_CACHE_BYTES
17523 ENTRY(idt_table)
17524- .skip IDT_ENTRIES * 16
17525+ .fill 512,8,0
17526
17527 .align L1_CACHE_BYTES
17528 ENTRY(nmi_idt_table)
17529- .skip IDT_ENTRIES * 16
17530+ .fill 512,8,0
17531
17532 __PAGE_ALIGNED_BSS
17533 .align PAGE_SIZE
17534diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17535index 9c3bd4a..e1d9b35 100644
17536--- a/arch/x86/kernel/i386_ksyms_32.c
17537+++ b/arch/x86/kernel/i386_ksyms_32.c
17538@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17539 EXPORT_SYMBOL(cmpxchg8b_emu);
17540 #endif
17541
17542+EXPORT_SYMBOL_GPL(cpu_gdt_table);
17543+
17544 /* Networking helper routines. */
17545 EXPORT_SYMBOL(csum_partial_copy_generic);
17546+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17547+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17548
17549 EXPORT_SYMBOL(__get_user_1);
17550 EXPORT_SYMBOL(__get_user_2);
17551@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17552
17553 EXPORT_SYMBOL(csum_partial);
17554 EXPORT_SYMBOL(empty_zero_page);
17555+
17556+#ifdef CONFIG_PAX_KERNEXEC
17557+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17558+#endif
17559diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
17560index f250431..54097e7 100644
17561--- a/arch/x86/kernel/i387.c
17562+++ b/arch/x86/kernel/i387.c
17563@@ -59,7 +59,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
17564 static inline bool interrupted_user_mode(void)
17565 {
17566 struct pt_regs *regs = get_irq_regs();
17567- return regs && user_mode_vm(regs);
17568+ return regs && user_mode(regs);
17569 }
17570
17571 /*
17572diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17573index 36d1853..bf25736 100644
17574--- a/arch/x86/kernel/i8259.c
17575+++ b/arch/x86/kernel/i8259.c
17576@@ -209,7 +209,7 @@ spurious_8259A_irq:
17577 "spurious 8259A interrupt: IRQ%d.\n", irq);
17578 spurious_irq_mask |= irqmask;
17579 }
17580- atomic_inc(&irq_err_count);
17581+ atomic_inc_unchecked(&irq_err_count);
17582 /*
17583 * Theoretically we do not have to handle this IRQ,
17584 * but in Linux this does not cause problems and is
17585diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17586index 8c96897..be66bfa 100644
17587--- a/arch/x86/kernel/ioport.c
17588+++ b/arch/x86/kernel/ioport.c
17589@@ -6,6 +6,7 @@
17590 #include <linux/sched.h>
17591 #include <linux/kernel.h>
17592 #include <linux/capability.h>
17593+#include <linux/security.h>
17594 #include <linux/errno.h>
17595 #include <linux/types.h>
17596 #include <linux/ioport.h>
17597@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17598
17599 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17600 return -EINVAL;
17601+#ifdef CONFIG_GRKERNSEC_IO
17602+ if (turn_on && grsec_disable_privio) {
17603+ gr_handle_ioperm();
17604+ return -EPERM;
17605+ }
17606+#endif
17607 if (turn_on && !capable(CAP_SYS_RAWIO))
17608 return -EPERM;
17609
17610@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17611 * because the ->io_bitmap_max value must match the bitmap
17612 * contents:
17613 */
17614- tss = &per_cpu(init_tss, get_cpu());
17615+ tss = init_tss + get_cpu();
17616
17617 if (turn_on)
17618 bitmap_clear(t->io_bitmap_ptr, from, num);
17619@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
17620 return -EINVAL;
17621 /* Trying to gain more privileges? */
17622 if (level > old) {
17623+#ifdef CONFIG_GRKERNSEC_IO
17624+ if (grsec_disable_privio) {
17625+ gr_handle_iopl();
17626+ return -EPERM;
17627+ }
17628+#endif
17629 if (!capable(CAP_SYS_RAWIO))
17630 return -EPERM;
17631 }
17632diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17633index 3dafc60..aa8e9c4 100644
17634--- a/arch/x86/kernel/irq.c
17635+++ b/arch/x86/kernel/irq.c
17636@@ -18,7 +18,7 @@
17637 #include <asm/mce.h>
17638 #include <asm/hw_irq.h>
17639
17640-atomic_t irq_err_count;
17641+atomic_unchecked_t irq_err_count;
17642
17643 /* Function pointer for generic interrupt vector handling */
17644 void (*x86_platform_ipi_callback)(void) = NULL;
17645@@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
17646 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17647 seq_printf(p, " Machine check polls\n");
17648 #endif
17649- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17650+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17651 #if defined(CONFIG_X86_IO_APIC)
17652- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17653+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17654 #endif
17655 return 0;
17656 }
17657@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17658
17659 u64 arch_irq_stat(void)
17660 {
17661- u64 sum = atomic_read(&irq_err_count);
17662+ u64 sum = atomic_read_unchecked(&irq_err_count);
17663
17664 #ifdef CONFIG_X86_IO_APIC
17665- sum += atomic_read(&irq_mis_count);
17666+ sum += atomic_read_unchecked(&irq_mis_count);
17667 #endif
17668 return sum;
17669 }
17670diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17671index 344faf8..355f60d 100644
17672--- a/arch/x86/kernel/irq_32.c
17673+++ b/arch/x86/kernel/irq_32.c
17674@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
17675 __asm__ __volatile__("andl %%esp,%0" :
17676 "=r" (sp) : "0" (THREAD_SIZE - 1));
17677
17678- return sp < (sizeof(struct thread_info) + STACK_WARN);
17679+ return sp < STACK_WARN;
17680 }
17681
17682 static void print_stack_overflow(void)
17683@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
17684 * per-CPU IRQ handling contexts (thread information and stack)
17685 */
17686 union irq_ctx {
17687- struct thread_info tinfo;
17688- u32 stack[THREAD_SIZE/sizeof(u32)];
17689+ unsigned long previous_esp;
17690+ u32 stack[THREAD_SIZE/sizeof(u32)];
17691 } __attribute__((aligned(THREAD_SIZE)));
17692
17693 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17694@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
17695 static inline int
17696 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17697 {
17698- union irq_ctx *curctx, *irqctx;
17699+ union irq_ctx *irqctx;
17700 u32 *isp, arg1, arg2;
17701
17702- curctx = (union irq_ctx *) current_thread_info();
17703 irqctx = __this_cpu_read(hardirq_ctx);
17704
17705 /*
17706@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17707 * handler) we can't do that and just have to keep using the
17708 * current stack (which is the irq stack already after all)
17709 */
17710- if (unlikely(curctx == irqctx))
17711+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17712 return 0;
17713
17714 /* build the stack frame on the IRQ stack */
17715- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17716- irqctx->tinfo.task = curctx->tinfo.task;
17717- irqctx->tinfo.previous_esp = current_stack_pointer;
17718+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17719+ irqctx->previous_esp = current_stack_pointer;
17720
17721- /* Copy the preempt_count so that the [soft]irq checks work. */
17722- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
17723+#ifdef CONFIG_PAX_MEMORY_UDEREF
17724+ __set_fs(MAKE_MM_SEG(0));
17725+#endif
17726
17727 if (unlikely(overflow))
17728 call_on_stack(print_stack_overflow, isp);
17729@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17730 : "0" (irq), "1" (desc), "2" (isp),
17731 "D" (desc->handle_irq)
17732 : "memory", "cc", "ecx");
17733+
17734+#ifdef CONFIG_PAX_MEMORY_UDEREF
17735+ __set_fs(current_thread_info()->addr_limit);
17736+#endif
17737+
17738 return 1;
17739 }
17740
17741@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17742 */
17743 void __cpuinit irq_ctx_init(int cpu)
17744 {
17745- union irq_ctx *irqctx;
17746-
17747 if (per_cpu(hardirq_ctx, cpu))
17748 return;
17749
17750- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17751- THREADINFO_GFP,
17752- THREAD_SIZE_ORDER));
17753- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17754- irqctx->tinfo.cpu = cpu;
17755- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17756- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17757-
17758- per_cpu(hardirq_ctx, cpu) = irqctx;
17759-
17760- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17761- THREADINFO_GFP,
17762- THREAD_SIZE_ORDER));
17763- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17764- irqctx->tinfo.cpu = cpu;
17765- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17766-
17767- per_cpu(softirq_ctx, cpu) = irqctx;
17768+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
17769+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
17770+
17771+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17772+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17773
17774 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17775 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17776@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
17777 asmlinkage void do_softirq(void)
17778 {
17779 unsigned long flags;
17780- struct thread_info *curctx;
17781 union irq_ctx *irqctx;
17782 u32 *isp;
17783
17784@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
17785 local_irq_save(flags);
17786
17787 if (local_softirq_pending()) {
17788- curctx = current_thread_info();
17789 irqctx = __this_cpu_read(softirq_ctx);
17790- irqctx->tinfo.task = curctx->task;
17791- irqctx->tinfo.previous_esp = current_stack_pointer;
17792+ irqctx->previous_esp = current_stack_pointer;
17793
17794 /* build the stack frame on the softirq stack */
17795- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17796+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17797+
17798+#ifdef CONFIG_PAX_MEMORY_UDEREF
17799+ __set_fs(MAKE_MM_SEG(0));
17800+#endif
17801
17802 call_on_stack(__do_softirq, isp);
17803+
17804+#ifdef CONFIG_PAX_MEMORY_UDEREF
17805+ __set_fs(current_thread_info()->addr_limit);
17806+#endif
17807+
17808 /*
17809 * Shouldn't happen, we returned above if in_interrupt():
17810 */
17811@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
17812 if (unlikely(!desc))
17813 return false;
17814
17815- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17816+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17817 if (unlikely(overflow))
17818 print_stack_overflow();
17819 desc->handle_irq(irq, desc);
17820diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17821index d04d3ec..ea4b374 100644
17822--- a/arch/x86/kernel/irq_64.c
17823+++ b/arch/x86/kernel/irq_64.c
17824@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17825 u64 estack_top, estack_bottom;
17826 u64 curbase = (u64)task_stack_page(current);
17827
17828- if (user_mode_vm(regs))
17829+ if (user_mode(regs))
17830 return;
17831
17832 if (regs->sp >= curbase + sizeof(struct thread_info) +
17833diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
17834index 1d5d31e..72731d4 100644
17835--- a/arch/x86/kernel/kdebugfs.c
17836+++ b/arch/x86/kernel/kdebugfs.c
17837@@ -27,7 +27,7 @@ struct setup_data_node {
17838 u32 len;
17839 };
17840
17841-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17842+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
17843 size_t count, loff_t *ppos)
17844 {
17845 struct setup_data_node *node = file->private_data;
17846diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17847index 3f61904..873cea9 100644
17848--- a/arch/x86/kernel/kgdb.c
17849+++ b/arch/x86/kernel/kgdb.c
17850@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17851 #ifdef CONFIG_X86_32
17852 switch (regno) {
17853 case GDB_SS:
17854- if (!user_mode_vm(regs))
17855+ if (!user_mode(regs))
17856 *(unsigned long *)mem = __KERNEL_DS;
17857 break;
17858 case GDB_SP:
17859- if (!user_mode_vm(regs))
17860+ if (!user_mode(regs))
17861 *(unsigned long *)mem = kernel_stack_pointer(regs);
17862 break;
17863 case GDB_GS:
17864@@ -476,12 +476,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17865 case 'k':
17866 /* clear the trace bit */
17867 linux_regs->flags &= ~X86_EFLAGS_TF;
17868- atomic_set(&kgdb_cpu_doing_single_step, -1);
17869+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17870
17871 /* set the trace bit if we're stepping */
17872 if (remcomInBuffer[0] == 's') {
17873 linux_regs->flags |= X86_EFLAGS_TF;
17874- atomic_set(&kgdb_cpu_doing_single_step,
17875+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17876 raw_smp_processor_id());
17877 }
17878
17879@@ -546,7 +546,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17880
17881 switch (cmd) {
17882 case DIE_DEBUG:
17883- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17884+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17885 if (user_mode(regs))
17886 return single_step_cont(regs, args);
17887 break;
17888diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
17889index c5e410e..da6aaf9 100644
17890--- a/arch/x86/kernel/kprobes-opt.c
17891+++ b/arch/x86/kernel/kprobes-opt.c
17892@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17893 * Verify if the address gap is in 2GB range, because this uses
17894 * a relative jump.
17895 */
17896- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17897+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17898 if (abs(rel) > 0x7fffffff)
17899 return -ERANGE;
17900
17901@@ -359,11 +359,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17902 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17903
17904 /* Set probe function call */
17905- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17906+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17907
17908 /* Set returning jmp instruction at the tail of out-of-line buffer */
17909 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17910- (u8 *)op->kp.addr + op->optinsn.size);
17911+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17912
17913 flush_icache_range((unsigned long) buf,
17914 (unsigned long) buf + TMPL_END_IDX +
17915@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17916 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17917
17918 /* Backup instructions which will be replaced by jump address */
17919- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17920+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17921 RELATIVE_ADDR_SIZE);
17922
17923 insn_buf[0] = RELATIVEJUMP_OPCODE;
17924diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17925index e2f751e..dffa2a0 100644
17926--- a/arch/x86/kernel/kprobes.c
17927+++ b/arch/x86/kernel/kprobes.c
17928@@ -120,8 +120,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17929 } __attribute__((packed)) *insn;
17930
17931 insn = (struct __arch_relative_insn *)from;
17932+
17933+ pax_open_kernel();
17934 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17935 insn->op = op;
17936+ pax_close_kernel();
17937 }
17938
17939 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17940@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
17941 kprobe_opcode_t opcode;
17942 kprobe_opcode_t *orig_opcodes = opcodes;
17943
17944- if (search_exception_tables((unsigned long)opcodes))
17945+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17946 return 0; /* Page fault may occur on this address. */
17947
17948 retry:
17949@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17950 /* Another subsystem puts a breakpoint, failed to recover */
17951 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
17952 return 0;
17953+ pax_open_kernel();
17954 memcpy(dest, insn.kaddr, insn.length);
17955+ pax_close_kernel();
17956
17957 #ifdef CONFIG_X86_64
17958 if (insn_rip_relative(&insn)) {
17959@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17960 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
17961 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17962 disp = (u8 *) dest + insn_offset_displacement(&insn);
17963+ pax_open_kernel();
17964 *(s32 *) disp = (s32) newdisp;
17965+ pax_close_kernel();
17966 }
17967 #endif
17968 return insn.length;
17969@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17970 * nor set current_kprobe, because it doesn't use single
17971 * stepping.
17972 */
17973- regs->ip = (unsigned long)p->ainsn.insn;
17974+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17975 preempt_enable_no_resched();
17976 return;
17977 }
17978@@ -504,7 +511,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17979 if (p->opcode == BREAKPOINT_INSTRUCTION)
17980 regs->ip = (unsigned long)p->addr;
17981 else
17982- regs->ip = (unsigned long)p->ainsn.insn;
17983+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17984 }
17985
17986 /*
17987@@ -583,7 +590,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17988 setup_singlestep(p, regs, kcb, 0);
17989 return 1;
17990 }
17991- } else if (*addr != BREAKPOINT_INSTRUCTION) {
17992+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17993 /*
17994 * The breakpoint instruction was removed right
17995 * after we hit it. Another cpu has removed
17996@@ -628,6 +635,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17997 " movq %rax, 152(%rsp)\n"
17998 RESTORE_REGS_STRING
17999 " popfq\n"
18000+#ifdef KERNEXEC_PLUGIN
18001+ " btsq $63,(%rsp)\n"
18002+#endif
18003 #else
18004 " pushf\n"
18005 SAVE_REGS_STRING
18006@@ -765,7 +775,7 @@ static void __kprobes
18007 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18008 {
18009 unsigned long *tos = stack_addr(regs);
18010- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18011+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18012 unsigned long orig_ip = (unsigned long)p->addr;
18013 kprobe_opcode_t *insn = p->ainsn.insn;
18014
18015@@ -947,7 +957,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
18016 struct die_args *args = data;
18017 int ret = NOTIFY_DONE;
18018
18019- if (args->regs && user_mode_vm(args->regs))
18020+ if (args->regs && user_mode(args->regs))
18021 return ret;
18022
18023 switch (val) {
18024diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18025index ebc9873..1b9724b 100644
18026--- a/arch/x86/kernel/ldt.c
18027+++ b/arch/x86/kernel/ldt.c
18028@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18029 if (reload) {
18030 #ifdef CONFIG_SMP
18031 preempt_disable();
18032- load_LDT(pc);
18033+ load_LDT_nolock(pc);
18034 if (!cpumask_equal(mm_cpumask(current->mm),
18035 cpumask_of(smp_processor_id())))
18036 smp_call_function(flush_ldt, current->mm, 1);
18037 preempt_enable();
18038 #else
18039- load_LDT(pc);
18040+ load_LDT_nolock(pc);
18041 #endif
18042 }
18043 if (oldsize) {
18044@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18045 return err;
18046
18047 for (i = 0; i < old->size; i++)
18048- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18049+ write_ldt_entry(new->ldt, i, old->ldt + i);
18050 return 0;
18051 }
18052
18053@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18054 retval = copy_ldt(&mm->context, &old_mm->context);
18055 mutex_unlock(&old_mm->context.lock);
18056 }
18057+
18058+ if (tsk == current) {
18059+ mm->context.vdso = 0;
18060+
18061+#ifdef CONFIG_X86_32
18062+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18063+ mm->context.user_cs_base = 0UL;
18064+ mm->context.user_cs_limit = ~0UL;
18065+
18066+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18067+ cpus_clear(mm->context.cpu_user_cs_mask);
18068+#endif
18069+
18070+#endif
18071+#endif
18072+
18073+ }
18074+
18075 return retval;
18076 }
18077
18078@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18079 }
18080 }
18081
18082+#ifdef CONFIG_PAX_SEGMEXEC
18083+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18084+ error = -EINVAL;
18085+ goto out_unlock;
18086+ }
18087+#endif
18088+
18089 fill_ldt(&ldt, &ldt_info);
18090 if (oldmode)
18091 ldt.avl = 0;
18092diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18093index 5b19e4d..6476a76 100644
18094--- a/arch/x86/kernel/machine_kexec_32.c
18095+++ b/arch/x86/kernel/machine_kexec_32.c
18096@@ -26,7 +26,7 @@
18097 #include <asm/cacheflush.h>
18098 #include <asm/debugreg.h>
18099
18100-static void set_idt(void *newidt, __u16 limit)
18101+static void set_idt(struct desc_struct *newidt, __u16 limit)
18102 {
18103 struct desc_ptr curidt;
18104
18105@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18106 }
18107
18108
18109-static void set_gdt(void *newgdt, __u16 limit)
18110+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18111 {
18112 struct desc_ptr curgdt;
18113
18114@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
18115 }
18116
18117 control_page = page_address(image->control_code_page);
18118- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18119+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18120
18121 relocate_kernel_ptr = control_page;
18122 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18123diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18124index 0327e2b..e43737b 100644
18125--- a/arch/x86/kernel/microcode_intel.c
18126+++ b/arch/x86/kernel/microcode_intel.c
18127@@ -430,13 +430,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18128
18129 static int get_ucode_user(void *to, const void *from, size_t n)
18130 {
18131- return copy_from_user(to, from, n);
18132+ return copy_from_user(to, (const void __force_user *)from, n);
18133 }
18134
18135 static enum ucode_state
18136 request_microcode_user(int cpu, const void __user *buf, size_t size)
18137 {
18138- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18139+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18140 }
18141
18142 static void microcode_fini_cpu(int cpu)
18143diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18144index f21fd94..61565cd 100644
18145--- a/arch/x86/kernel/module.c
18146+++ b/arch/x86/kernel/module.c
18147@@ -35,15 +35,60 @@
18148 #define DEBUGP(fmt...)
18149 #endif
18150
18151-void *module_alloc(unsigned long size)
18152+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
18153 {
18154- if (PAGE_ALIGN(size) > MODULES_LEN)
18155+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
18156 return NULL;
18157 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
18158- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
18159+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
18160 -1, __builtin_return_address(0));
18161 }
18162
18163+void *module_alloc(unsigned long size)
18164+{
18165+
18166+#ifdef CONFIG_PAX_KERNEXEC
18167+ return __module_alloc(size, PAGE_KERNEL);
18168+#else
18169+ return __module_alloc(size, PAGE_KERNEL_EXEC);
18170+#endif
18171+
18172+}
18173+
18174+#ifdef CONFIG_PAX_KERNEXEC
18175+#ifdef CONFIG_X86_32
18176+void *module_alloc_exec(unsigned long size)
18177+{
18178+ struct vm_struct *area;
18179+
18180+ if (size == 0)
18181+ return NULL;
18182+
18183+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18184+ return area ? area->addr : NULL;
18185+}
18186+EXPORT_SYMBOL(module_alloc_exec);
18187+
18188+void module_free_exec(struct module *mod, void *module_region)
18189+{
18190+ vunmap(module_region);
18191+}
18192+EXPORT_SYMBOL(module_free_exec);
18193+#else
18194+void module_free_exec(struct module *mod, void *module_region)
18195+{
18196+ module_free(mod, module_region);
18197+}
18198+EXPORT_SYMBOL(module_free_exec);
18199+
18200+void *module_alloc_exec(unsigned long size)
18201+{
18202+ return __module_alloc(size, PAGE_KERNEL_RX);
18203+}
18204+EXPORT_SYMBOL(module_alloc_exec);
18205+#endif
18206+#endif
18207+
18208 #ifdef CONFIG_X86_32
18209 int apply_relocate(Elf32_Shdr *sechdrs,
18210 const char *strtab,
18211@@ -54,14 +99,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18212 unsigned int i;
18213 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18214 Elf32_Sym *sym;
18215- uint32_t *location;
18216+ uint32_t *plocation, location;
18217
18218 DEBUGP("Applying relocate section %u to %u\n", relsec,
18219 sechdrs[relsec].sh_info);
18220 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18221 /* This is where to make the change */
18222- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18223- + rel[i].r_offset;
18224+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18225+ location = (uint32_t)plocation;
18226+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18227+ plocation = ktla_ktva((void *)plocation);
18228 /* This is the symbol it is referring to. Note that all
18229 undefined symbols have been resolved. */
18230 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18231@@ -70,11 +117,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18232 switch (ELF32_R_TYPE(rel[i].r_info)) {
18233 case R_386_32:
18234 /* We add the value into the location given */
18235- *location += sym->st_value;
18236+ pax_open_kernel();
18237+ *plocation += sym->st_value;
18238+ pax_close_kernel();
18239 break;
18240 case R_386_PC32:
18241 /* Add the value, subtract its postition */
18242- *location += sym->st_value - (uint32_t)location;
18243+ pax_open_kernel();
18244+ *plocation += sym->st_value - location;
18245+ pax_close_kernel();
18246 break;
18247 default:
18248 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18249@@ -119,21 +170,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18250 case R_X86_64_NONE:
18251 break;
18252 case R_X86_64_64:
18253+ pax_open_kernel();
18254 *(u64 *)loc = val;
18255+ pax_close_kernel();
18256 break;
18257 case R_X86_64_32:
18258+ pax_open_kernel();
18259 *(u32 *)loc = val;
18260+ pax_close_kernel();
18261 if (val != *(u32 *)loc)
18262 goto overflow;
18263 break;
18264 case R_X86_64_32S:
18265+ pax_open_kernel();
18266 *(s32 *)loc = val;
18267+ pax_close_kernel();
18268 if ((s64)val != *(s32 *)loc)
18269 goto overflow;
18270 break;
18271 case R_X86_64_PC32:
18272 val -= (u64)loc;
18273+ pax_open_kernel();
18274 *(u32 *)loc = val;
18275+ pax_close_kernel();
18276+
18277 #if 0
18278 if ((s64)val != *(s32 *)loc)
18279 goto overflow;
18280diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
18281index a0b2f84..875ab81 100644
18282--- a/arch/x86/kernel/nmi.c
18283+++ b/arch/x86/kernel/nmi.c
18284@@ -460,6 +460,17 @@ static inline void nmi_nesting_postprocess(void)
18285 dotraplinkage notrace __kprobes void
18286 do_nmi(struct pt_regs *regs, long error_code)
18287 {
18288+
18289+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18290+ if (!user_mode(regs)) {
18291+ unsigned long cs = regs->cs & 0xFFFF;
18292+ unsigned long ip = ktva_ktla(regs->ip);
18293+
18294+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18295+ regs->ip = ip;
18296+ }
18297+#endif
18298+
18299 nmi_nesting_preprocess(regs);
18300
18301 nmi_enter();
18302diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18303index 676b8c7..870ba04 100644
18304--- a/arch/x86/kernel/paravirt-spinlocks.c
18305+++ b/arch/x86/kernel/paravirt-spinlocks.c
18306@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
18307 arch_spin_lock(lock);
18308 }
18309
18310-struct pv_lock_ops pv_lock_ops = {
18311+struct pv_lock_ops pv_lock_ops __read_only = {
18312 #ifdef CONFIG_SMP
18313 .spin_is_locked = __ticket_spin_is_locked,
18314 .spin_is_contended = __ticket_spin_is_contended,
18315diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18316index 9ce8859..b49bf51 100644
18317--- a/arch/x86/kernel/paravirt.c
18318+++ b/arch/x86/kernel/paravirt.c
18319@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
18320 {
18321 return x;
18322 }
18323+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18324+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18325+#endif
18326
18327 void __init default_banner(void)
18328 {
18329@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18330 if (opfunc == NULL)
18331 /* If there's no function, patch it with a ud2a (BUG) */
18332 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18333- else if (opfunc == _paravirt_nop)
18334+ else if (opfunc == (void *)_paravirt_nop)
18335 /* If the operation is a nop, then nop the callsite */
18336 ret = paravirt_patch_nop();
18337
18338 /* identity functions just return their single argument */
18339- else if (opfunc == _paravirt_ident_32)
18340+ else if (opfunc == (void *)_paravirt_ident_32)
18341 ret = paravirt_patch_ident_32(insnbuf, len);
18342- else if (opfunc == _paravirt_ident_64)
18343+ else if (opfunc == (void *)_paravirt_ident_64)
18344 ret = paravirt_patch_ident_64(insnbuf, len);
18345+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18346+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18347+ ret = paravirt_patch_ident_64(insnbuf, len);
18348+#endif
18349
18350 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18351 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18352@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18353 if (insn_len > len || start == NULL)
18354 insn_len = len;
18355 else
18356- memcpy(insnbuf, start, insn_len);
18357+ memcpy(insnbuf, ktla_ktva(start), insn_len);
18358
18359 return insn_len;
18360 }
18361@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
18362 preempt_enable();
18363 }
18364
18365-struct pv_info pv_info = {
18366+struct pv_info pv_info __read_only = {
18367 .name = "bare hardware",
18368 .paravirt_enabled = 0,
18369 .kernel_rpl = 0,
18370@@ -315,16 +322,16 @@ struct pv_info pv_info = {
18371 #endif
18372 };
18373
18374-struct pv_init_ops pv_init_ops = {
18375+struct pv_init_ops pv_init_ops __read_only = {
18376 .patch = native_patch,
18377 };
18378
18379-struct pv_time_ops pv_time_ops = {
18380+struct pv_time_ops pv_time_ops __read_only = {
18381 .sched_clock = native_sched_clock,
18382 .steal_clock = native_steal_clock,
18383 };
18384
18385-struct pv_irq_ops pv_irq_ops = {
18386+struct pv_irq_ops pv_irq_ops __read_only = {
18387 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18388 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18389 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18390@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
18391 #endif
18392 };
18393
18394-struct pv_cpu_ops pv_cpu_ops = {
18395+struct pv_cpu_ops pv_cpu_ops __read_only = {
18396 .cpuid = native_cpuid,
18397 .get_debugreg = native_get_debugreg,
18398 .set_debugreg = native_set_debugreg,
18399@@ -397,21 +404,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18400 .end_context_switch = paravirt_nop,
18401 };
18402
18403-struct pv_apic_ops pv_apic_ops = {
18404+struct pv_apic_ops pv_apic_ops __read_only = {
18405 #ifdef CONFIG_X86_LOCAL_APIC
18406 .startup_ipi_hook = paravirt_nop,
18407 #endif
18408 };
18409
18410-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18411+#ifdef CONFIG_X86_32
18412+#ifdef CONFIG_X86_PAE
18413+/* 64-bit pagetable entries */
18414+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18415+#else
18416 /* 32-bit pagetable entries */
18417 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18418+#endif
18419 #else
18420 /* 64-bit pagetable entries */
18421 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18422 #endif
18423
18424-struct pv_mmu_ops pv_mmu_ops = {
18425+struct pv_mmu_ops pv_mmu_ops __read_only = {
18426
18427 .read_cr2 = native_read_cr2,
18428 .write_cr2 = native_write_cr2,
18429@@ -461,6 +473,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18430 .make_pud = PTE_IDENT,
18431
18432 .set_pgd = native_set_pgd,
18433+ .set_pgd_batched = native_set_pgd_batched,
18434 #endif
18435 #endif /* PAGETABLE_LEVELS >= 3 */
18436
18437@@ -480,6 +493,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18438 },
18439
18440 .set_fixmap = native_set_fixmap,
18441+
18442+#ifdef CONFIG_PAX_KERNEXEC
18443+ .pax_open_kernel = native_pax_open_kernel,
18444+ .pax_close_kernel = native_pax_close_kernel,
18445+#endif
18446+
18447 };
18448
18449 EXPORT_SYMBOL_GPL(pv_time_ops);
18450diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
18451index 35ccf75..7a15747 100644
18452--- a/arch/x86/kernel/pci-iommu_table.c
18453+++ b/arch/x86/kernel/pci-iommu_table.c
18454@@ -2,7 +2,7 @@
18455 #include <asm/iommu_table.h>
18456 #include <linux/string.h>
18457 #include <linux/kallsyms.h>
18458-
18459+#include <linux/sched.h>
18460
18461 #define DEBUG 1
18462
18463diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18464index 735279e..5008677 100644
18465--- a/arch/x86/kernel/process.c
18466+++ b/arch/x86/kernel/process.c
18467@@ -34,7 +34,8 @@
18468 * section. Since TSS's are completely CPU-local, we want them
18469 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
18470 */
18471-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
18472+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
18473+EXPORT_SYMBOL(init_tss);
18474
18475 #ifdef CONFIG_X86_64
18476 static DEFINE_PER_CPU(unsigned char, is_idle);
18477@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
18478 task_xstate_cachep =
18479 kmem_cache_create("task_xstate", xstate_size,
18480 __alignof__(union thread_xstate),
18481- SLAB_PANIC | SLAB_NOTRACK, NULL);
18482+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18483 }
18484
18485 static inline void drop_fpu(struct task_struct *tsk)
18486@@ -115,7 +116,7 @@ void exit_thread(void)
18487 unsigned long *bp = t->io_bitmap_ptr;
18488
18489 if (bp) {
18490- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18491+ struct tss_struct *tss = init_tss + get_cpu();
18492
18493 t->io_bitmap_ptr = NULL;
18494 clear_thread_flag(TIF_IO_BITMAP);
18495@@ -147,7 +148,7 @@ void show_regs_common(void)
18496
18497 printk(KERN_CONT "\n");
18498 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18499- current->pid, current->comm, print_tainted(),
18500+ task_pid_nr(current), current->comm, print_tainted(),
18501 init_utsname()->release,
18502 (int)strcspn(init_utsname()->version, " "),
18503 init_utsname()->version);
18504@@ -161,6 +162,9 @@ void flush_thread(void)
18505 {
18506 struct task_struct *tsk = current;
18507
18508+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18509+ loadsegment(gs, 0);
18510+#endif
18511 flush_ptrace_hw_breakpoint(tsk);
18512 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18513 drop_fpu(tsk);
18514@@ -318,10 +322,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18515 regs.di = (unsigned long) arg;
18516
18517 #ifdef CONFIG_X86_32
18518- regs.ds = __USER_DS;
18519- regs.es = __USER_DS;
18520+ regs.ds = __KERNEL_DS;
18521+ regs.es = __KERNEL_DS;
18522 regs.fs = __KERNEL_PERCPU;
18523- regs.gs = __KERNEL_STACK_CANARY;
18524+ savesegment(gs, regs.gs);
18525 #else
18526 regs.ss = __KERNEL_DS;
18527 #endif
18528@@ -407,7 +411,7 @@ static void __exit_idle(void)
18529 void exit_idle(void)
18530 {
18531 /* idle loop has pid 0 */
18532- if (current->pid)
18533+ if (task_pid_nr(current))
18534 return;
18535 __exit_idle();
18536 }
18537@@ -516,7 +520,7 @@ bool set_pm_idle_to_default(void)
18538
18539 return ret;
18540 }
18541-void stop_this_cpu(void *dummy)
18542+__noreturn void stop_this_cpu(void *dummy)
18543 {
18544 local_irq_disable();
18545 /*
18546@@ -746,16 +750,37 @@ static int __init idle_setup(char *str)
18547 }
18548 early_param("idle", idle_setup);
18549
18550-unsigned long arch_align_stack(unsigned long sp)
18551+#ifdef CONFIG_PAX_RANDKSTACK
18552+void pax_randomize_kstack(struct pt_regs *regs)
18553 {
18554- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18555- sp -= get_random_int() % 8192;
18556- return sp & ~0xf;
18557-}
18558+ struct thread_struct *thread = &current->thread;
18559+ unsigned long time;
18560
18561-unsigned long arch_randomize_brk(struct mm_struct *mm)
18562-{
18563- unsigned long range_end = mm->brk + 0x02000000;
18564- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18565-}
18566+ if (!randomize_va_space)
18567+ return;
18568+
18569+ if (v8086_mode(regs))
18570+ return;
18571
18572+ rdtscl(time);
18573+
18574+ /* P4 seems to return a 0 LSB, ignore it */
18575+#ifdef CONFIG_MPENTIUM4
18576+ time &= 0x3EUL;
18577+ time <<= 2;
18578+#elif defined(CONFIG_X86_64)
18579+ time &= 0xFUL;
18580+ time <<= 4;
18581+#else
18582+ time &= 0x1FUL;
18583+ time <<= 3;
18584+#endif
18585+
18586+ thread->sp0 ^= time;
18587+ load_sp0(init_tss + smp_processor_id(), thread);
18588+
18589+#ifdef CONFIG_X86_64
18590+ this_cpu_write(kernel_stack, thread->sp0);
18591+#endif
18592+}
18593+#endif
18594diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18595index 516fa18..80bd9e6 100644
18596--- a/arch/x86/kernel/process_32.c
18597+++ b/arch/x86/kernel/process_32.c
18598@@ -64,6 +64,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18599 unsigned long thread_saved_pc(struct task_struct *tsk)
18600 {
18601 return ((unsigned long *)tsk->thread.sp)[3];
18602+//XXX return tsk->thread.eip;
18603 }
18604
18605 void __show_regs(struct pt_regs *regs, int all)
18606@@ -73,15 +74,14 @@ void __show_regs(struct pt_regs *regs, int all)
18607 unsigned long sp;
18608 unsigned short ss, gs;
18609
18610- if (user_mode_vm(regs)) {
18611+ if (user_mode(regs)) {
18612 sp = regs->sp;
18613 ss = regs->ss & 0xffff;
18614- gs = get_user_gs(regs);
18615 } else {
18616 sp = kernel_stack_pointer(regs);
18617 savesegment(ss, ss);
18618- savesegment(gs, gs);
18619 }
18620+ gs = get_user_gs(regs);
18621
18622 show_regs_common();
18623
18624@@ -134,13 +134,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18625 struct task_struct *tsk;
18626 int err;
18627
18628- childregs = task_pt_regs(p);
18629+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18630 *childregs = *regs;
18631 childregs->ax = 0;
18632 childregs->sp = sp;
18633
18634 p->thread.sp = (unsigned long) childregs;
18635 p->thread.sp0 = (unsigned long) (childregs+1);
18636+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18637
18638 p->thread.ip = (unsigned long) ret_from_fork;
18639
18640@@ -231,7 +232,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18641 struct thread_struct *prev = &prev_p->thread,
18642 *next = &next_p->thread;
18643 int cpu = smp_processor_id();
18644- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18645+ struct tss_struct *tss = init_tss + cpu;
18646 fpu_switch_t fpu;
18647
18648 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18649@@ -255,6 +256,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18650 */
18651 lazy_save_gs(prev->gs);
18652
18653+#ifdef CONFIG_PAX_MEMORY_UDEREF
18654+ __set_fs(task_thread_info(next_p)->addr_limit);
18655+#endif
18656+
18657 /*
18658 * Load the per-thread Thread-Local Storage descriptor.
18659 */
18660@@ -285,6 +290,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18661 */
18662 arch_end_context_switch(next_p);
18663
18664+ this_cpu_write(current_task, next_p);
18665+ this_cpu_write(current_tinfo, &next_p->tinfo);
18666+
18667 /*
18668 * Restore %gs if needed (which is common)
18669 */
18670@@ -293,8 +301,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18671
18672 switch_fpu_finish(next_p, fpu);
18673
18674- this_cpu_write(current_task, next_p);
18675-
18676 return prev_p;
18677 }
18678
18679@@ -324,4 +330,3 @@ unsigned long get_wchan(struct task_struct *p)
18680 } while (count++ < 16);
18681 return 0;
18682 }
18683-
18684diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18685index 61cdf7f..797f06a 100644
18686--- a/arch/x86/kernel/process_64.c
18687+++ b/arch/x86/kernel/process_64.c
18688@@ -153,8 +153,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18689 struct pt_regs *childregs;
18690 struct task_struct *me = current;
18691
18692- childregs = ((struct pt_regs *)
18693- (THREAD_SIZE + task_stack_page(p))) - 1;
18694+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18695 *childregs = *regs;
18696
18697 childregs->ax = 0;
18698@@ -166,6 +165,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18699 p->thread.sp = (unsigned long) childregs;
18700 p->thread.sp0 = (unsigned long) (childregs+1);
18701 p->thread.usersp = me->thread.usersp;
18702+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18703
18704 set_tsk_thread_flag(p, TIF_FORK);
18705
18706@@ -271,7 +271,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18707 struct thread_struct *prev = &prev_p->thread;
18708 struct thread_struct *next = &next_p->thread;
18709 int cpu = smp_processor_id();
18710- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18711+ struct tss_struct *tss = init_tss + cpu;
18712 unsigned fsindex, gsindex;
18713 fpu_switch_t fpu;
18714
18715@@ -353,10 +353,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18716 prev->usersp = this_cpu_read(old_rsp);
18717 this_cpu_write(old_rsp, next->usersp);
18718 this_cpu_write(current_task, next_p);
18719+ this_cpu_write(current_tinfo, &next_p->tinfo);
18720
18721- this_cpu_write(kernel_stack,
18722- (unsigned long)task_stack_page(next_p) +
18723- THREAD_SIZE - KERNEL_STACK_OFFSET);
18724+ this_cpu_write(kernel_stack, next->sp0);
18725
18726 /*
18727 * Now maybe reload the debug registers and handle I/O bitmaps
18728@@ -425,12 +424,11 @@ unsigned long get_wchan(struct task_struct *p)
18729 if (!p || p == current || p->state == TASK_RUNNING)
18730 return 0;
18731 stack = (unsigned long)task_stack_page(p);
18732- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18733+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18734 return 0;
18735 fp = *(u64 *)(p->thread.sp);
18736 do {
18737- if (fp < (unsigned long)stack ||
18738- fp >= (unsigned long)stack+THREAD_SIZE)
18739+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18740 return 0;
18741 ip = *(u64 *)(fp+8);
18742 if (!in_sched_functions(ip))
18743diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18744index c4c6a5c..905f440 100644
18745--- a/arch/x86/kernel/ptrace.c
18746+++ b/arch/x86/kernel/ptrace.c
18747@@ -824,7 +824,7 @@ long arch_ptrace(struct task_struct *child, long request,
18748 unsigned long addr, unsigned long data)
18749 {
18750 int ret;
18751- unsigned long __user *datap = (unsigned long __user *)data;
18752+ unsigned long __user *datap = (__force unsigned long __user *)data;
18753
18754 switch (request) {
18755 /* read the word at location addr in the USER area. */
18756@@ -909,14 +909,14 @@ long arch_ptrace(struct task_struct *child, long request,
18757 if ((int) addr < 0)
18758 return -EIO;
18759 ret = do_get_thread_area(child, addr,
18760- (struct user_desc __user *)data);
18761+ (__force struct user_desc __user *) data);
18762 break;
18763
18764 case PTRACE_SET_THREAD_AREA:
18765 if ((int) addr < 0)
18766 return -EIO;
18767 ret = do_set_thread_area(child, addr,
18768- (struct user_desc __user *)data, 0);
18769+ (__force struct user_desc __user *) data, 0);
18770 break;
18771 #endif
18772
18773@@ -1426,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18774 memset(info, 0, sizeof(*info));
18775 info->si_signo = SIGTRAP;
18776 info->si_code = si_code;
18777- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18778+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18779 }
18780
18781 void user_single_step_siginfo(struct task_struct *tsk,
18782@@ -1455,6 +1455,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18783 # define IS_IA32 0
18784 #endif
18785
18786+#ifdef CONFIG_GRKERNSEC_SETXID
18787+extern void gr_delayed_cred_worker(void);
18788+#endif
18789+
18790 /*
18791 * We must return the syscall number to actually look up in the table.
18792 * This can be -1L to skip running any syscall at all.
18793@@ -1463,6 +1467,11 @@ long syscall_trace_enter(struct pt_regs *regs)
18794 {
18795 long ret = 0;
18796
18797+#ifdef CONFIG_GRKERNSEC_SETXID
18798+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18799+ gr_delayed_cred_worker();
18800+#endif
18801+
18802 /*
18803 * If we stepped into a sysenter/syscall insn, it trapped in
18804 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
18805@@ -1511,6 +1520,11 @@ void syscall_trace_leave(struct pt_regs *regs)
18806 {
18807 bool step;
18808
18809+#ifdef CONFIG_GRKERNSEC_SETXID
18810+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18811+ gr_delayed_cred_worker();
18812+#endif
18813+
18814 audit_syscall_exit(regs);
18815
18816 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
18817diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18818index 42eb330..139955c 100644
18819--- a/arch/x86/kernel/pvclock.c
18820+++ b/arch/x86/kernel/pvclock.c
18821@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18822 return pv_tsc_khz;
18823 }
18824
18825-static atomic64_t last_value = ATOMIC64_INIT(0);
18826+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18827
18828 void pvclock_resume(void)
18829 {
18830- atomic64_set(&last_value, 0);
18831+ atomic64_set_unchecked(&last_value, 0);
18832 }
18833
18834 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18835@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18836 * updating at the same time, and one of them could be slightly behind,
18837 * making the assumption that last_value always go forward fail to hold.
18838 */
18839- last = atomic64_read(&last_value);
18840+ last = atomic64_read_unchecked(&last_value);
18841 do {
18842 if (ret < last)
18843 return last;
18844- last = atomic64_cmpxchg(&last_value, last, ret);
18845+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18846 } while (unlikely(last != ret));
18847
18848 return ret;
18849diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18850index 5de92f1..776788d 100644
18851--- a/arch/x86/kernel/reboot.c
18852+++ b/arch/x86/kernel/reboot.c
18853@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
18854 EXPORT_SYMBOL(pm_power_off);
18855
18856 static const struct desc_ptr no_idt = {};
18857-static int reboot_mode;
18858+static unsigned short reboot_mode;
18859 enum reboot_type reboot_type = BOOT_ACPI;
18860 int reboot_force;
18861
18862@@ -157,11 +157,15 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
18863 return 0;
18864 }
18865
18866-void machine_real_restart(unsigned int type)
18867+__noreturn void machine_real_restart(unsigned int type)
18868 {
18869 void (*restart_lowmem)(unsigned int) = (void (*)(unsigned int))
18870 real_mode_header->machine_real_restart_asm;
18871
18872+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18873+ struct desc_struct *gdt;
18874+#endif
18875+
18876 local_irq_disable();
18877
18878 /*
18879@@ -189,10 +193,38 @@ void machine_real_restart(unsigned int type)
18880 * boot)". This seems like a fairly standard thing that gets set by
18881 * REBOOT.COM programs, and the previous reset routine did this
18882 * too. */
18883- *((unsigned short *)0x472) = reboot_mode;
18884+ *(unsigned short *)(__va(0x472)) = reboot_mode;
18885
18886 /* Jump to the identity-mapped low memory code */
18887+
18888+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18889+ gdt = get_cpu_gdt_table(smp_processor_id());
18890+ pax_open_kernel();
18891+#ifdef CONFIG_PAX_MEMORY_UDEREF
18892+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18893+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18894+ loadsegment(ds, __KERNEL_DS);
18895+ loadsegment(es, __KERNEL_DS);
18896+ loadsegment(ss, __KERNEL_DS);
18897+#endif
18898+#ifdef CONFIG_PAX_KERNEXEC
18899+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18900+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18901+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18902+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18903+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18904+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18905+#endif
18906+ pax_close_kernel();
18907+#endif
18908+
18909+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18910+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18911+ unreachable();
18912+#else
18913 restart_lowmem(type);
18914+#endif
18915+
18916 }
18917 #ifdef CONFIG_APM_MODULE
18918 EXPORT_SYMBOL(machine_real_restart);
18919@@ -543,7 +575,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18920 * try to force a triple fault and then cycle between hitting the keyboard
18921 * controller and doing that
18922 */
18923-static void native_machine_emergency_restart(void)
18924+static void __noreturn native_machine_emergency_restart(void)
18925 {
18926 int i;
18927 int attempt = 0;
18928@@ -670,13 +702,13 @@ void native_machine_shutdown(void)
18929 #endif
18930 }
18931
18932-static void __machine_emergency_restart(int emergency)
18933+static __noreturn void __machine_emergency_restart(int emergency)
18934 {
18935 reboot_emergency = emergency;
18936 machine_ops.emergency_restart();
18937 }
18938
18939-static void native_machine_restart(char *__unused)
18940+static void __noreturn native_machine_restart(char *__unused)
18941 {
18942 printk("machine restart\n");
18943
18944@@ -685,7 +717,7 @@ static void native_machine_restart(char *__unused)
18945 __machine_emergency_restart(0);
18946 }
18947
18948-static void native_machine_halt(void)
18949+static void __noreturn native_machine_halt(void)
18950 {
18951 /* Stop other cpus and apics */
18952 machine_shutdown();
18953@@ -695,7 +727,7 @@ static void native_machine_halt(void)
18954 stop_this_cpu(NULL);
18955 }
18956
18957-static void native_machine_power_off(void)
18958+static void __noreturn native_machine_power_off(void)
18959 {
18960 if (pm_power_off) {
18961 if (!reboot_force)
18962@@ -704,6 +736,7 @@ static void native_machine_power_off(void)
18963 }
18964 /* A fallback in case there is no PM info available */
18965 tboot_shutdown(TB_SHUTDOWN_HALT);
18966+ unreachable();
18967 }
18968
18969 struct machine_ops machine_ops = {
18970diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
18971index 7a6f3b3..bed145d7 100644
18972--- a/arch/x86/kernel/relocate_kernel_64.S
18973+++ b/arch/x86/kernel/relocate_kernel_64.S
18974@@ -11,6 +11,7 @@
18975 #include <asm/kexec.h>
18976 #include <asm/processor-flags.h>
18977 #include <asm/pgtable_types.h>
18978+#include <asm/alternative-asm.h>
18979
18980 /*
18981 * Must be relocatable PIC code callable as a C function
18982@@ -160,13 +161,14 @@ identity_mapped:
18983 xorq %rbp, %rbp
18984 xorq %r8, %r8
18985 xorq %r9, %r9
18986- xorq %r10, %r9
18987+ xorq %r10, %r10
18988 xorq %r11, %r11
18989 xorq %r12, %r12
18990 xorq %r13, %r13
18991 xorq %r14, %r14
18992 xorq %r15, %r15
18993
18994+ pax_force_retaddr 0, 1
18995 ret
18996
18997 1:
18998diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
18999index 16be6dc..4686132 100644
19000--- a/arch/x86/kernel/setup.c
19001+++ b/arch/x86/kernel/setup.c
19002@@ -440,7 +440,7 @@ static void __init parse_setup_data(void)
19003
19004 switch (data->type) {
19005 case SETUP_E820_EXT:
19006- parse_e820_ext(data);
19007+ parse_e820_ext((struct setup_data __force_kernel *)data);
19008 break;
19009 case SETUP_DTB:
19010 add_dtb(pa_data);
19011@@ -632,7 +632,7 @@ static void __init trim_bios_range(void)
19012 * area (640->1Mb) as ram even though it is not.
19013 * take them out.
19014 */
19015- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
19016+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
19017 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
19018 }
19019
19020@@ -755,14 +755,14 @@ void __init setup_arch(char **cmdline_p)
19021
19022 if (!boot_params.hdr.root_flags)
19023 root_mountflags &= ~MS_RDONLY;
19024- init_mm.start_code = (unsigned long) _text;
19025- init_mm.end_code = (unsigned long) _etext;
19026+ init_mm.start_code = ktla_ktva((unsigned long) _text);
19027+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
19028 init_mm.end_data = (unsigned long) _edata;
19029 init_mm.brk = _brk_end;
19030
19031- code_resource.start = virt_to_phys(_text);
19032- code_resource.end = virt_to_phys(_etext)-1;
19033- data_resource.start = virt_to_phys(_etext);
19034+ code_resource.start = virt_to_phys(ktla_ktva(_text));
19035+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19036+ data_resource.start = virt_to_phys(_sdata);
19037 data_resource.end = virt_to_phys(_edata)-1;
19038 bss_resource.start = virt_to_phys(&__bss_start);
19039 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19040diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19041index 5a98aa2..2f9288d 100644
19042--- a/arch/x86/kernel/setup_percpu.c
19043+++ b/arch/x86/kernel/setup_percpu.c
19044@@ -21,19 +21,17 @@
19045 #include <asm/cpu.h>
19046 #include <asm/stackprotector.h>
19047
19048-DEFINE_PER_CPU(int, cpu_number);
19049+#ifdef CONFIG_SMP
19050+DEFINE_PER_CPU(unsigned int, cpu_number);
19051 EXPORT_PER_CPU_SYMBOL(cpu_number);
19052+#endif
19053
19054-#ifdef CONFIG_X86_64
19055 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19056-#else
19057-#define BOOT_PERCPU_OFFSET 0
19058-#endif
19059
19060 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19061 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19062
19063-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19064+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19065 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19066 };
19067 EXPORT_SYMBOL(__per_cpu_offset);
19068@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
19069 {
19070 #ifdef CONFIG_X86_32
19071 struct desc_struct gdt;
19072+ unsigned long base = per_cpu_offset(cpu);
19073
19074- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19075- 0x2 | DESCTYPE_S, 0x8);
19076- gdt.s = 1;
19077+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19078+ 0x83 | DESCTYPE_S, 0xC);
19079 write_gdt_entry(get_cpu_gdt_table(cpu),
19080 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19081 #endif
19082@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
19083 /* alrighty, percpu areas up and running */
19084 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19085 for_each_possible_cpu(cpu) {
19086+#ifdef CONFIG_CC_STACKPROTECTOR
19087+#ifdef CONFIG_X86_32
19088+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
19089+#endif
19090+#endif
19091 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19092 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19093 per_cpu(cpu_number, cpu) = cpu;
19094@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
19095 */
19096 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
19097 #endif
19098+#ifdef CONFIG_CC_STACKPROTECTOR
19099+#ifdef CONFIG_X86_32
19100+ if (!cpu)
19101+ per_cpu(stack_canary.canary, cpu) = canary;
19102+#endif
19103+#endif
19104 /*
19105 * Up to this point, the boot CPU has been using .init.data
19106 * area. Reload any changed state for the boot CPU.
19107diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19108index 21af737..fb45e22 100644
19109--- a/arch/x86/kernel/signal.c
19110+++ b/arch/x86/kernel/signal.c
19111@@ -191,7 +191,7 @@ static unsigned long align_sigframe(unsigned long sp)
19112 * Align the stack pointer according to the i386 ABI,
19113 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19114 */
19115- sp = ((sp + 4) & -16ul) - 4;
19116+ sp = ((sp - 12) & -16ul) - 4;
19117 #else /* !CONFIG_X86_32 */
19118 sp = round_down(sp, 16) - 8;
19119 #endif
19120@@ -242,11 +242,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19121 * Return an always-bogus address instead so we will die with SIGSEGV.
19122 */
19123 if (onsigstack && !likely(on_sig_stack(sp)))
19124- return (void __user *)-1L;
19125+ return (__force void __user *)-1L;
19126
19127 /* save i387 state */
19128 if (used_math() && save_i387_xstate(*fpstate) < 0)
19129- return (void __user *)-1L;
19130+ return (__force void __user *)-1L;
19131
19132 return (void __user *)sp;
19133 }
19134@@ -301,9 +301,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19135 }
19136
19137 if (current->mm->context.vdso)
19138- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19139+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19140 else
19141- restorer = &frame->retcode;
19142+ restorer = (void __user *)&frame->retcode;
19143 if (ka->sa.sa_flags & SA_RESTORER)
19144 restorer = ka->sa.sa_restorer;
19145
19146@@ -317,7 +317,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19147 * reasons and because gdb uses it as a signature to notice
19148 * signal handler stack frames.
19149 */
19150- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19151+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19152
19153 if (err)
19154 return -EFAULT;
19155@@ -371,7 +371,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19156 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19157
19158 /* Set up to return from userspace. */
19159- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19160+ if (current->mm->context.vdso)
19161+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19162+ else
19163+ restorer = (void __user *)&frame->retcode;
19164 if (ka->sa.sa_flags & SA_RESTORER)
19165 restorer = ka->sa.sa_restorer;
19166 put_user_ex(restorer, &frame->pretcode);
19167@@ -383,7 +386,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19168 * reasons and because gdb uses it as a signature to notice
19169 * signal handler stack frames.
19170 */
19171- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19172+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19173 } put_user_catch(err);
19174
19175 if (err)
19176diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19177index 7bd8a08..2659b5b 100644
19178--- a/arch/x86/kernel/smpboot.c
19179+++ b/arch/x86/kernel/smpboot.c
19180@@ -679,6 +679,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
19181 idle->thread.sp = (unsigned long) (((struct pt_regs *)
19182 (THREAD_SIZE + task_stack_page(idle))) - 1);
19183 per_cpu(current_task, cpu) = idle;
19184+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
19185
19186 #ifdef CONFIG_X86_32
19187 /* Stack for startup_32 can be just as for start_secondary onwards */
19188@@ -686,11 +687,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
19189 #else
19190 clear_tsk_thread_flag(idle, TIF_FORK);
19191 initial_gs = per_cpu_offset(cpu);
19192- per_cpu(kernel_stack, cpu) =
19193- (unsigned long)task_stack_page(idle) -
19194- KERNEL_STACK_OFFSET + THREAD_SIZE;
19195+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
19196 #endif
19197+
19198+ pax_open_kernel();
19199 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19200+ pax_close_kernel();
19201+
19202 initial_code = (unsigned long)start_secondary;
19203 stack_start = idle->thread.sp;
19204
19205@@ -826,6 +829,12 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
19206
19207 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19208
19209+#ifdef CONFIG_PAX_PER_CPU_PGD
19210+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19211+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19212+ KERNEL_PGD_PTRS);
19213+#endif
19214+
19215 err = do_boot_cpu(apicid, cpu, tidle);
19216 if (err) {
19217 pr_debug("do_boot_cpu failed %d\n", err);
19218diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19219index c346d11..d43b163 100644
19220--- a/arch/x86/kernel/step.c
19221+++ b/arch/x86/kernel/step.c
19222@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19223 struct desc_struct *desc;
19224 unsigned long base;
19225
19226- seg &= ~7UL;
19227+ seg >>= 3;
19228
19229 mutex_lock(&child->mm->context.lock);
19230- if (unlikely((seg >> 3) >= child->mm->context.size))
19231+ if (unlikely(seg >= child->mm->context.size))
19232 addr = -1L; /* bogus selector, access would fault */
19233 else {
19234 desc = child->mm->context.ldt + seg;
19235@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19236 addr += base;
19237 }
19238 mutex_unlock(&child->mm->context.lock);
19239- }
19240+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19241+ addr = ktla_ktva(addr);
19242
19243 return addr;
19244 }
19245@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19246 unsigned char opcode[15];
19247 unsigned long addr = convert_ip_to_linear(child, regs);
19248
19249+ if (addr == -EINVAL)
19250+ return 0;
19251+
19252 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19253 for (i = 0; i < copied; i++) {
19254 switch (opcode[i]) {
19255diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19256index 0b0cb5f..db6b9ed 100644
19257--- a/arch/x86/kernel/sys_i386_32.c
19258+++ b/arch/x86/kernel/sys_i386_32.c
19259@@ -24,17 +24,224 @@
19260
19261 #include <asm/syscalls.h>
19262
19263-/*
19264- * Do a system call from kernel instead of calling sys_execve so we
19265- * end up with proper pt_regs.
19266- */
19267-int kernel_execve(const char *filename,
19268- const char *const argv[],
19269- const char *const envp[])
19270+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19271 {
19272- long __res;
19273- asm volatile ("int $0x80"
19274- : "=a" (__res)
19275- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
19276- return __res;
19277+ unsigned long pax_task_size = TASK_SIZE;
19278+
19279+#ifdef CONFIG_PAX_SEGMEXEC
19280+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19281+ pax_task_size = SEGMEXEC_TASK_SIZE;
19282+#endif
19283+
19284+ if (len > pax_task_size || addr > pax_task_size - len)
19285+ return -EINVAL;
19286+
19287+ return 0;
19288+}
19289+
19290+unsigned long
19291+arch_get_unmapped_area(struct file *filp, unsigned long addr,
19292+ unsigned long len, unsigned long pgoff, unsigned long flags)
19293+{
19294+ struct mm_struct *mm = current->mm;
19295+ struct vm_area_struct *vma;
19296+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19297+
19298+#ifdef CONFIG_PAX_SEGMEXEC
19299+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19300+ pax_task_size = SEGMEXEC_TASK_SIZE;
19301+#endif
19302+
19303+ pax_task_size -= PAGE_SIZE;
19304+
19305+ if (len > pax_task_size)
19306+ return -ENOMEM;
19307+
19308+ if (flags & MAP_FIXED)
19309+ return addr;
19310+
19311+#ifdef CONFIG_PAX_RANDMMAP
19312+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19313+#endif
19314+
19315+ if (addr) {
19316+ addr = PAGE_ALIGN(addr);
19317+ if (pax_task_size - len >= addr) {
19318+ vma = find_vma(mm, addr);
19319+ if (check_heap_stack_gap(vma, addr, len))
19320+ return addr;
19321+ }
19322+ }
19323+ if (len > mm->cached_hole_size) {
19324+ start_addr = addr = mm->free_area_cache;
19325+ } else {
19326+ start_addr = addr = mm->mmap_base;
19327+ mm->cached_hole_size = 0;
19328+ }
19329+
19330+#ifdef CONFIG_PAX_PAGEEXEC
19331+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19332+ start_addr = 0x00110000UL;
19333+
19334+#ifdef CONFIG_PAX_RANDMMAP
19335+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19336+ start_addr += mm->delta_mmap & 0x03FFF000UL;
19337+#endif
19338+
19339+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19340+ start_addr = addr = mm->mmap_base;
19341+ else
19342+ addr = start_addr;
19343+ }
19344+#endif
19345+
19346+full_search:
19347+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19348+ /* At this point: (!vma || addr < vma->vm_end). */
19349+ if (pax_task_size - len < addr) {
19350+ /*
19351+ * Start a new search - just in case we missed
19352+ * some holes.
19353+ */
19354+ if (start_addr != mm->mmap_base) {
19355+ start_addr = addr = mm->mmap_base;
19356+ mm->cached_hole_size = 0;
19357+ goto full_search;
19358+ }
19359+ return -ENOMEM;
19360+ }
19361+ if (check_heap_stack_gap(vma, addr, len))
19362+ break;
19363+ if (addr + mm->cached_hole_size < vma->vm_start)
19364+ mm->cached_hole_size = vma->vm_start - addr;
19365+ addr = vma->vm_end;
19366+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
19367+ start_addr = addr = mm->mmap_base;
19368+ mm->cached_hole_size = 0;
19369+ goto full_search;
19370+ }
19371+ }
19372+
19373+ /*
19374+ * Remember the place where we stopped the search:
19375+ */
19376+ mm->free_area_cache = addr + len;
19377+ return addr;
19378+}
19379+
19380+unsigned long
19381+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19382+ const unsigned long len, const unsigned long pgoff,
19383+ const unsigned long flags)
19384+{
19385+ struct vm_area_struct *vma;
19386+ struct mm_struct *mm = current->mm;
19387+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19388+
19389+#ifdef CONFIG_PAX_SEGMEXEC
19390+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19391+ pax_task_size = SEGMEXEC_TASK_SIZE;
19392+#endif
19393+
19394+ pax_task_size -= PAGE_SIZE;
19395+
19396+ /* requested length too big for entire address space */
19397+ if (len > pax_task_size)
19398+ return -ENOMEM;
19399+
19400+ if (flags & MAP_FIXED)
19401+ return addr;
19402+
19403+#ifdef CONFIG_PAX_PAGEEXEC
19404+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19405+ goto bottomup;
19406+#endif
19407+
19408+#ifdef CONFIG_PAX_RANDMMAP
19409+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19410+#endif
19411+
19412+ /* requesting a specific address */
19413+ if (addr) {
19414+ addr = PAGE_ALIGN(addr);
19415+ if (pax_task_size - len >= addr) {
19416+ vma = find_vma(mm, addr);
19417+ if (check_heap_stack_gap(vma, addr, len))
19418+ return addr;
19419+ }
19420+ }
19421+
19422+ /* check if free_area_cache is useful for us */
19423+ if (len <= mm->cached_hole_size) {
19424+ mm->cached_hole_size = 0;
19425+ mm->free_area_cache = mm->mmap_base;
19426+ }
19427+
19428+ /* either no address requested or can't fit in requested address hole */
19429+ addr = mm->free_area_cache;
19430+
19431+ /* make sure it can fit in the remaining address space */
19432+ if (addr > len) {
19433+ vma = find_vma(mm, addr-len);
19434+ if (check_heap_stack_gap(vma, addr - len, len))
19435+ /* remember the address as a hint for next time */
19436+ return (mm->free_area_cache = addr-len);
19437+ }
19438+
19439+ if (mm->mmap_base < len)
19440+ goto bottomup;
19441+
19442+ addr = mm->mmap_base-len;
19443+
19444+ do {
19445+ /*
19446+ * Lookup failure means no vma is above this address,
19447+ * else if new region fits below vma->vm_start,
19448+ * return with success:
19449+ */
19450+ vma = find_vma(mm, addr);
19451+ if (check_heap_stack_gap(vma, addr, len))
19452+ /* remember the address as a hint for next time */
19453+ return (mm->free_area_cache = addr);
19454+
19455+ /* remember the largest hole we saw so far */
19456+ if (addr + mm->cached_hole_size < vma->vm_start)
19457+ mm->cached_hole_size = vma->vm_start - addr;
19458+
19459+ /* try just below the current vma->vm_start */
19460+ addr = skip_heap_stack_gap(vma, len);
19461+ } while (!IS_ERR_VALUE(addr));
19462+
19463+bottomup:
19464+ /*
19465+ * A failed mmap() very likely causes application failure,
19466+ * so fall back to the bottom-up function here. This scenario
19467+ * can happen with large stack limits and large mmap()
19468+ * allocations.
19469+ */
19470+
19471+#ifdef CONFIG_PAX_SEGMEXEC
19472+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19473+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19474+ else
19475+#endif
19476+
19477+ mm->mmap_base = TASK_UNMAPPED_BASE;
19478+
19479+#ifdef CONFIG_PAX_RANDMMAP
19480+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19481+ mm->mmap_base += mm->delta_mmap;
19482+#endif
19483+
19484+ mm->free_area_cache = mm->mmap_base;
19485+ mm->cached_hole_size = ~0UL;
19486+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19487+ /*
19488+ * Restore the topdown base:
19489+ */
19490+ mm->mmap_base = base;
19491+ mm->free_area_cache = base;
19492+ mm->cached_hole_size = ~0UL;
19493+
19494+ return addr;
19495 }
19496diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19497index b4d3c39..82bb73b 100644
19498--- a/arch/x86/kernel/sys_x86_64.c
19499+++ b/arch/x86/kernel/sys_x86_64.c
19500@@ -95,8 +95,8 @@ out:
19501 return error;
19502 }
19503
19504-static void find_start_end(unsigned long flags, unsigned long *begin,
19505- unsigned long *end)
19506+static void find_start_end(struct mm_struct *mm, unsigned long flags,
19507+ unsigned long *begin, unsigned long *end)
19508 {
19509 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
19510 unsigned long new_begin;
19511@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19512 *begin = new_begin;
19513 }
19514 } else {
19515- *begin = TASK_UNMAPPED_BASE;
19516+ *begin = mm->mmap_base;
19517 *end = TASK_SIZE;
19518 }
19519 }
19520@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19521 if (flags & MAP_FIXED)
19522 return addr;
19523
19524- find_start_end(flags, &begin, &end);
19525+ find_start_end(mm, flags, &begin, &end);
19526
19527 if (len > end)
19528 return -ENOMEM;
19529
19530+#ifdef CONFIG_PAX_RANDMMAP
19531+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19532+#endif
19533+
19534 if (addr) {
19535 addr = PAGE_ALIGN(addr);
19536 vma = find_vma(mm, addr);
19537- if (end - len >= addr &&
19538- (!vma || addr + len <= vma->vm_start))
19539+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19540 return addr;
19541 }
19542 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
19543@@ -172,7 +175,7 @@ full_search:
19544 }
19545 return -ENOMEM;
19546 }
19547- if (!vma || addr + len <= vma->vm_start) {
19548+ if (check_heap_stack_gap(vma, addr, len)) {
19549 /*
19550 * Remember the place where we stopped the search:
19551 */
19552@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19553 {
19554 struct vm_area_struct *vma;
19555 struct mm_struct *mm = current->mm;
19556- unsigned long addr = addr0, start_addr;
19557+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
19558
19559 /* requested length too big for entire address space */
19560 if (len > TASK_SIZE)
19561@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19562 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
19563 goto bottomup;
19564
19565+#ifdef CONFIG_PAX_RANDMMAP
19566+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19567+#endif
19568+
19569 /* requesting a specific address */
19570 if (addr) {
19571 addr = PAGE_ALIGN(addr);
19572- vma = find_vma(mm, addr);
19573- if (TASK_SIZE - len >= addr &&
19574- (!vma || addr + len <= vma->vm_start))
19575- return addr;
19576+ if (TASK_SIZE - len >= addr) {
19577+ vma = find_vma(mm, addr);
19578+ if (check_heap_stack_gap(vma, addr, len))
19579+ return addr;
19580+ }
19581 }
19582
19583 /* check if free_area_cache is useful for us */
19584@@ -240,7 +248,7 @@ try_again:
19585 * return with success:
19586 */
19587 vma = find_vma(mm, addr);
19588- if (!vma || addr+len <= vma->vm_start)
19589+ if (check_heap_stack_gap(vma, addr, len))
19590 /* remember the address as a hint for next time */
19591 return mm->free_area_cache = addr;
19592
19593@@ -249,8 +257,8 @@ try_again:
19594 mm->cached_hole_size = vma->vm_start - addr;
19595
19596 /* try just below the current vma->vm_start */
19597- addr = vma->vm_start-len;
19598- } while (len < vma->vm_start);
19599+ addr = skip_heap_stack_gap(vma, len);
19600+ } while (!IS_ERR_VALUE(addr));
19601
19602 fail:
19603 /*
19604@@ -270,13 +278,21 @@ bottomup:
19605 * can happen with large stack limits and large mmap()
19606 * allocations.
19607 */
19608+ mm->mmap_base = TASK_UNMAPPED_BASE;
19609+
19610+#ifdef CONFIG_PAX_RANDMMAP
19611+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19612+ mm->mmap_base += mm->delta_mmap;
19613+#endif
19614+
19615+ mm->free_area_cache = mm->mmap_base;
19616 mm->cached_hole_size = ~0UL;
19617- mm->free_area_cache = TASK_UNMAPPED_BASE;
19618 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19619 /*
19620 * Restore the topdown base:
19621 */
19622- mm->free_area_cache = mm->mmap_base;
19623+ mm->mmap_base = base;
19624+ mm->free_area_cache = base;
19625 mm->cached_hole_size = ~0UL;
19626
19627 return addr;
19628diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19629index f84fe00..93fe08f 100644
19630--- a/arch/x86/kernel/tboot.c
19631+++ b/arch/x86/kernel/tboot.c
19632@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
19633
19634 void tboot_shutdown(u32 shutdown_type)
19635 {
19636- void (*shutdown)(void);
19637+ void (* __noreturn shutdown)(void);
19638
19639 if (!tboot_enabled())
19640 return;
19641@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
19642
19643 switch_to_tboot_pt();
19644
19645- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19646+ shutdown = (void *)tboot->shutdown_entry;
19647 shutdown();
19648
19649 /* should not reach here */
19650@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19651 return 0;
19652 }
19653
19654-static atomic_t ap_wfs_count;
19655+static atomic_unchecked_t ap_wfs_count;
19656
19657 static int tboot_wait_for_aps(int num_aps)
19658 {
19659@@ -324,9 +324,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19660 {
19661 switch (action) {
19662 case CPU_DYING:
19663- atomic_inc(&ap_wfs_count);
19664+ atomic_inc_unchecked(&ap_wfs_count);
19665 if (num_online_cpus() == 1)
19666- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19667+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19668 return NOTIFY_BAD;
19669 break;
19670 }
19671@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
19672
19673 tboot_create_trampoline();
19674
19675- atomic_set(&ap_wfs_count, 0);
19676+ atomic_set_unchecked(&ap_wfs_count, 0);
19677 register_hotcpu_notifier(&tboot_cpu_notifier);
19678
19679 acpi_os_set_prepare_sleep(&tboot_sleep);
19680diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19681index 24d3c91..d06b473 100644
19682--- a/arch/x86/kernel/time.c
19683+++ b/arch/x86/kernel/time.c
19684@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
19685 {
19686 unsigned long pc = instruction_pointer(regs);
19687
19688- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19689+ if (!user_mode(regs) && in_lock_functions(pc)) {
19690 #ifdef CONFIG_FRAME_POINTER
19691- return *(unsigned long *)(regs->bp + sizeof(long));
19692+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19693 #else
19694 unsigned long *sp =
19695 (unsigned long *)kernel_stack_pointer(regs);
19696@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19697 * or above a saved flags. Eflags has bits 22-31 zero,
19698 * kernel addresses don't.
19699 */
19700+
19701+#ifdef CONFIG_PAX_KERNEXEC
19702+ return ktla_ktva(sp[0]);
19703+#else
19704 if (sp[0] >> 22)
19705 return sp[0];
19706 if (sp[1] >> 22)
19707 return sp[1];
19708 #endif
19709+
19710+#endif
19711 }
19712 return pc;
19713 }
19714diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19715index 9d9d2f9..ed344e4 100644
19716--- a/arch/x86/kernel/tls.c
19717+++ b/arch/x86/kernel/tls.c
19718@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19719 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19720 return -EINVAL;
19721
19722+#ifdef CONFIG_PAX_SEGMEXEC
19723+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19724+ return -EINVAL;
19725+#endif
19726+
19727 set_tls_desc(p, idx, &info, 1);
19728
19729 return 0;
19730diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19731index 05b31d9..501d3ba 100644
19732--- a/arch/x86/kernel/traps.c
19733+++ b/arch/x86/kernel/traps.c
19734@@ -67,12 +67,6 @@ asmlinkage int system_call(void);
19735
19736 /* Do we ignore FPU interrupts ? */
19737 char ignore_fpu_irq;
19738-
19739-/*
19740- * The IDT has to be page-aligned to simplify the Pentium
19741- * F0 0F bug workaround.
19742- */
19743-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19744 #endif
19745
19746 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19747@@ -105,13 +99,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19748 }
19749
19750 static void __kprobes
19751-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19752+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19753 long error_code, siginfo_t *info)
19754 {
19755 struct task_struct *tsk = current;
19756
19757 #ifdef CONFIG_X86_32
19758- if (regs->flags & X86_VM_MASK) {
19759+ if (v8086_mode(regs)) {
19760 /*
19761 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19762 * On nmi (interrupt 2), do_trap should not be called.
19763@@ -122,7 +116,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19764 }
19765 #endif
19766
19767- if (!user_mode(regs))
19768+ if (!user_mode_novm(regs))
19769 goto kernel_trap;
19770
19771 #ifdef CONFIG_X86_32
19772@@ -145,7 +139,7 @@ trap_signal:
19773 printk_ratelimit()) {
19774 printk(KERN_INFO
19775 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19776- tsk->comm, tsk->pid, str,
19777+ tsk->comm, task_pid_nr(tsk), str,
19778 regs->ip, regs->sp, error_code);
19779 print_vma_addr(" in ", regs->ip);
19780 printk("\n");
19781@@ -162,8 +156,20 @@ kernel_trap:
19782 if (!fixup_exception(regs)) {
19783 tsk->thread.error_code = error_code;
19784 tsk->thread.trap_nr = trapnr;
19785+
19786+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19787+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19788+ str = "PAX: suspicious stack segment fault";
19789+#endif
19790+
19791 die(str, regs, error_code);
19792 }
19793+
19794+#ifdef CONFIG_PAX_REFCOUNT
19795+ if (trapnr == 4)
19796+ pax_report_refcount_overflow(regs);
19797+#endif
19798+
19799 return;
19800
19801 #ifdef CONFIG_X86_32
19802@@ -256,14 +262,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19803 conditional_sti(regs);
19804
19805 #ifdef CONFIG_X86_32
19806- if (regs->flags & X86_VM_MASK)
19807+ if (v8086_mode(regs))
19808 goto gp_in_vm86;
19809 #endif
19810
19811 tsk = current;
19812- if (!user_mode(regs))
19813+ if (!user_mode_novm(regs))
19814 goto gp_in_kernel;
19815
19816+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19817+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19818+ struct mm_struct *mm = tsk->mm;
19819+ unsigned long limit;
19820+
19821+ down_write(&mm->mmap_sem);
19822+ limit = mm->context.user_cs_limit;
19823+ if (limit < TASK_SIZE) {
19824+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19825+ up_write(&mm->mmap_sem);
19826+ return;
19827+ }
19828+ up_write(&mm->mmap_sem);
19829+ }
19830+#endif
19831+
19832 tsk->thread.error_code = error_code;
19833 tsk->thread.trap_nr = X86_TRAP_GP;
19834
19835@@ -296,6 +318,13 @@ gp_in_kernel:
19836 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
19837 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
19838 return;
19839+
19840+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19841+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19842+ die("PAX: suspicious general protection fault", regs, error_code);
19843+ else
19844+#endif
19845+
19846 die("general protection fault", regs, error_code);
19847 }
19848
19849@@ -431,7 +460,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19850 /* It's safe to allow irq's after DR6 has been saved */
19851 preempt_conditional_sti(regs);
19852
19853- if (regs->flags & X86_VM_MASK) {
19854+ if (v8086_mode(regs)) {
19855 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
19856 X86_TRAP_DB);
19857 preempt_conditional_cli(regs);
19858@@ -446,7 +475,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19859 * We already checked v86 mode above, so we can check for kernel mode
19860 * by just checking the CPL of CS.
19861 */
19862- if ((dr6 & DR_STEP) && !user_mode(regs)) {
19863+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19864 tsk->thread.debugreg6 &= ~DR_STEP;
19865 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19866 regs->flags &= ~X86_EFLAGS_TF;
19867@@ -477,7 +506,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19868 return;
19869 conditional_sti(regs);
19870
19871- if (!user_mode_vm(regs))
19872+ if (!user_mode(regs))
19873 {
19874 if (!fixup_exception(regs)) {
19875 task->thread.error_code = error_code;
19876diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
19877index dc4e910..c9dedab 100644
19878--- a/arch/x86/kernel/uprobes.c
19879+++ b/arch/x86/kernel/uprobes.c
19880@@ -606,7 +606,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
19881 int ret = NOTIFY_DONE;
19882
19883 /* We are only interested in userspace traps */
19884- if (regs && !user_mode_vm(regs))
19885+ if (regs && !user_mode(regs))
19886 return NOTIFY_DONE;
19887
19888 switch (val) {
19889diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19890index b9242ba..50c5edd 100644
19891--- a/arch/x86/kernel/verify_cpu.S
19892+++ b/arch/x86/kernel/verify_cpu.S
19893@@ -20,6 +20,7 @@
19894 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19895 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19896 * arch/x86/kernel/head_32.S: processor startup
19897+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19898 *
19899 * verify_cpu, returns the status of longmode and SSE in register %eax.
19900 * 0: Success 1: Failure
19901diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19902index 255f58a..5e91150 100644
19903--- a/arch/x86/kernel/vm86_32.c
19904+++ b/arch/x86/kernel/vm86_32.c
19905@@ -41,6 +41,7 @@
19906 #include <linux/ptrace.h>
19907 #include <linux/audit.h>
19908 #include <linux/stddef.h>
19909+#include <linux/grsecurity.h>
19910
19911 #include <asm/uaccess.h>
19912 #include <asm/io.h>
19913@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
19914 do_exit(SIGSEGV);
19915 }
19916
19917- tss = &per_cpu(init_tss, get_cpu());
19918+ tss = init_tss + get_cpu();
19919 current->thread.sp0 = current->thread.saved_sp0;
19920 current->thread.sysenter_cs = __KERNEL_CS;
19921 load_sp0(tss, &current->thread);
19922@@ -210,6 +211,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
19923 struct task_struct *tsk;
19924 int tmp, ret = -EPERM;
19925
19926+#ifdef CONFIG_GRKERNSEC_VM86
19927+ if (!capable(CAP_SYS_RAWIO)) {
19928+ gr_handle_vm86();
19929+ goto out;
19930+ }
19931+#endif
19932+
19933 tsk = current;
19934 if (tsk->thread.saved_sp0)
19935 goto out;
19936@@ -240,6 +248,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
19937 int tmp, ret;
19938 struct vm86plus_struct __user *v86;
19939
19940+#ifdef CONFIG_GRKERNSEC_VM86
19941+ if (!capable(CAP_SYS_RAWIO)) {
19942+ gr_handle_vm86();
19943+ ret = -EPERM;
19944+ goto out;
19945+ }
19946+#endif
19947+
19948 tsk = current;
19949 switch (cmd) {
19950 case VM86_REQUEST_IRQ:
19951@@ -326,7 +342,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
19952 tsk->thread.saved_fs = info->regs32->fs;
19953 tsk->thread.saved_gs = get_user_gs(info->regs32);
19954
19955- tss = &per_cpu(init_tss, get_cpu());
19956+ tss = init_tss + get_cpu();
19957 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
19958 if (cpu_has_sep)
19959 tsk->thread.sysenter_cs = 0;
19960@@ -533,7 +549,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
19961 goto cannot_handle;
19962 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
19963 goto cannot_handle;
19964- intr_ptr = (unsigned long __user *) (i << 2);
19965+ intr_ptr = (__force unsigned long __user *) (i << 2);
19966 if (get_user(segoffs, intr_ptr))
19967 goto cannot_handle;
19968 if ((segoffs >> 16) == BIOSSEG)
19969diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
19970index 22a1530..8fbaaad 100644
19971--- a/arch/x86/kernel/vmlinux.lds.S
19972+++ b/arch/x86/kernel/vmlinux.lds.S
19973@@ -26,6 +26,13 @@
19974 #include <asm/page_types.h>
19975 #include <asm/cache.h>
19976 #include <asm/boot.h>
19977+#include <asm/segment.h>
19978+
19979+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19980+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
19981+#else
19982+#define __KERNEL_TEXT_OFFSET 0
19983+#endif
19984
19985 #undef i386 /* in case the preprocessor is a 32bit one */
19986
19987@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
19988
19989 PHDRS {
19990 text PT_LOAD FLAGS(5); /* R_E */
19991+#ifdef CONFIG_X86_32
19992+ module PT_LOAD FLAGS(5); /* R_E */
19993+#endif
19994+#ifdef CONFIG_XEN
19995+ rodata PT_LOAD FLAGS(5); /* R_E */
19996+#else
19997+ rodata PT_LOAD FLAGS(4); /* R__ */
19998+#endif
19999 data PT_LOAD FLAGS(6); /* RW_ */
20000-#ifdef CONFIG_X86_64
20001+ init.begin PT_LOAD FLAGS(6); /* RW_ */
20002 #ifdef CONFIG_SMP
20003 percpu PT_LOAD FLAGS(6); /* RW_ */
20004 #endif
20005+ text.init PT_LOAD FLAGS(5); /* R_E */
20006+ text.exit PT_LOAD FLAGS(5); /* R_E */
20007 init PT_LOAD FLAGS(7); /* RWE */
20008-#endif
20009 note PT_NOTE FLAGS(0); /* ___ */
20010 }
20011
20012 SECTIONS
20013 {
20014 #ifdef CONFIG_X86_32
20015- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20016- phys_startup_32 = startup_32 - LOAD_OFFSET;
20017+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20018 #else
20019- . = __START_KERNEL;
20020- phys_startup_64 = startup_64 - LOAD_OFFSET;
20021+ . = __START_KERNEL;
20022 #endif
20023
20024 /* Text and read-only data */
20025- .text : AT(ADDR(.text) - LOAD_OFFSET) {
20026- _text = .;
20027+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20028 /* bootstrapping code */
20029+#ifdef CONFIG_X86_32
20030+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20031+#else
20032+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20033+#endif
20034+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20035+ _text = .;
20036 HEAD_TEXT
20037 #ifdef CONFIG_X86_32
20038 . = ALIGN(PAGE_SIZE);
20039@@ -108,13 +128,48 @@ SECTIONS
20040 IRQENTRY_TEXT
20041 *(.fixup)
20042 *(.gnu.warning)
20043- /* End of text section */
20044- _etext = .;
20045 } :text = 0x9090
20046
20047- NOTES :text :note
20048+ . += __KERNEL_TEXT_OFFSET;
20049
20050- EXCEPTION_TABLE(16) :text = 0x9090
20051+#ifdef CONFIG_X86_32
20052+ . = ALIGN(PAGE_SIZE);
20053+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20054+
20055+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20056+ MODULES_EXEC_VADDR = .;
20057+ BYTE(0)
20058+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20059+ . = ALIGN(HPAGE_SIZE) - 1;
20060+ MODULES_EXEC_END = .;
20061+#endif
20062+
20063+ } :module
20064+#endif
20065+
20066+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20067+ /* End of text section */
20068+ BYTE(0)
20069+ _etext = . - __KERNEL_TEXT_OFFSET;
20070+ }
20071+
20072+#ifdef CONFIG_X86_32
20073+ . = ALIGN(PAGE_SIZE);
20074+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20075+ *(.idt)
20076+ . = ALIGN(PAGE_SIZE);
20077+ *(.empty_zero_page)
20078+ *(.initial_pg_fixmap)
20079+ *(.initial_pg_pmd)
20080+ *(.initial_page_table)
20081+ *(.swapper_pg_dir)
20082+ } :rodata
20083+#endif
20084+
20085+ . = ALIGN(PAGE_SIZE);
20086+ NOTES :rodata :note
20087+
20088+ EXCEPTION_TABLE(16) :rodata
20089
20090 #if defined(CONFIG_DEBUG_RODATA)
20091 /* .text should occupy whole number of pages */
20092@@ -126,16 +181,20 @@ SECTIONS
20093
20094 /* Data */
20095 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20096+
20097+#ifdef CONFIG_PAX_KERNEXEC
20098+ . = ALIGN(HPAGE_SIZE);
20099+#else
20100+ . = ALIGN(PAGE_SIZE);
20101+#endif
20102+
20103 /* Start of data section */
20104 _sdata = .;
20105
20106 /* init_task */
20107 INIT_TASK_DATA(THREAD_SIZE)
20108
20109-#ifdef CONFIG_X86_32
20110- /* 32 bit has nosave before _edata */
20111 NOSAVE_DATA
20112-#endif
20113
20114 PAGE_ALIGNED_DATA(PAGE_SIZE)
20115
20116@@ -176,12 +235,19 @@ SECTIONS
20117 #endif /* CONFIG_X86_64 */
20118
20119 /* Init code and data - will be freed after init */
20120- . = ALIGN(PAGE_SIZE);
20121 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20122+ BYTE(0)
20123+
20124+#ifdef CONFIG_PAX_KERNEXEC
20125+ . = ALIGN(HPAGE_SIZE);
20126+#else
20127+ . = ALIGN(PAGE_SIZE);
20128+#endif
20129+
20130 __init_begin = .; /* paired with __init_end */
20131- }
20132+ } :init.begin
20133
20134-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20135+#ifdef CONFIG_SMP
20136 /*
20137 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20138 * output PHDR, so the next output section - .init.text - should
20139@@ -190,12 +256,27 @@ SECTIONS
20140 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
20141 #endif
20142
20143- INIT_TEXT_SECTION(PAGE_SIZE)
20144-#ifdef CONFIG_X86_64
20145- :init
20146-#endif
20147+ . = ALIGN(PAGE_SIZE);
20148+ init_begin = .;
20149+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20150+ VMLINUX_SYMBOL(_sinittext) = .;
20151+ INIT_TEXT
20152+ VMLINUX_SYMBOL(_einittext) = .;
20153+ . = ALIGN(PAGE_SIZE);
20154+ } :text.init
20155
20156- INIT_DATA_SECTION(16)
20157+ /*
20158+ * .exit.text is discard at runtime, not link time, to deal with
20159+ * references from .altinstructions and .eh_frame
20160+ */
20161+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20162+ EXIT_TEXT
20163+ . = ALIGN(16);
20164+ } :text.exit
20165+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20166+
20167+ . = ALIGN(PAGE_SIZE);
20168+ INIT_DATA_SECTION(16) :init
20169
20170 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
20171 __x86_cpu_dev_start = .;
20172@@ -257,19 +338,12 @@ SECTIONS
20173 }
20174
20175 . = ALIGN(8);
20176- /*
20177- * .exit.text is discard at runtime, not link time, to deal with
20178- * references from .altinstructions and .eh_frame
20179- */
20180- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20181- EXIT_TEXT
20182- }
20183
20184 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20185 EXIT_DATA
20186 }
20187
20188-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20189+#ifndef CONFIG_SMP
20190 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
20191 #endif
20192
20193@@ -288,16 +362,10 @@ SECTIONS
20194 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
20195 __smp_locks = .;
20196 *(.smp_locks)
20197- . = ALIGN(PAGE_SIZE);
20198 __smp_locks_end = .;
20199+ . = ALIGN(PAGE_SIZE);
20200 }
20201
20202-#ifdef CONFIG_X86_64
20203- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20204- NOSAVE_DATA
20205- }
20206-#endif
20207-
20208 /* BSS */
20209 . = ALIGN(PAGE_SIZE);
20210 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20211@@ -313,6 +381,7 @@ SECTIONS
20212 __brk_base = .;
20213 . += 64 * 1024; /* 64k alignment slop space */
20214 *(.brk_reservation) /* areas brk users have reserved */
20215+ . = ALIGN(HPAGE_SIZE);
20216 __brk_limit = .;
20217 }
20218
20219@@ -339,13 +408,12 @@ SECTIONS
20220 * for the boot processor.
20221 */
20222 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
20223-INIT_PER_CPU(gdt_page);
20224 INIT_PER_CPU(irq_stack_union);
20225
20226 /*
20227 * Build-time check on the image size:
20228 */
20229-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20230+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20231 "kernel image bigger than KERNEL_IMAGE_SIZE");
20232
20233 #ifdef CONFIG_SMP
20234diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20235index 5db36ca..2938af9 100644
20236--- a/arch/x86/kernel/vsyscall_64.c
20237+++ b/arch/x86/kernel/vsyscall_64.c
20238@@ -54,15 +54,13 @@
20239 DEFINE_VVAR(int, vgetcpu_mode);
20240 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
20241
20242-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
20243+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
20244
20245 static int __init vsyscall_setup(char *str)
20246 {
20247 if (str) {
20248 if (!strcmp("emulate", str))
20249 vsyscall_mode = EMULATE;
20250- else if (!strcmp("native", str))
20251- vsyscall_mode = NATIVE;
20252 else if (!strcmp("none", str))
20253 vsyscall_mode = NONE;
20254 else
20255@@ -309,8 +307,7 @@ done:
20256 return true;
20257
20258 sigsegv:
20259- force_sig(SIGSEGV, current);
20260- return true;
20261+ do_group_exit(SIGKILL);
20262 }
20263
20264 /*
20265@@ -363,10 +360,7 @@ void __init map_vsyscall(void)
20266 extern char __vvar_page;
20267 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
20268
20269- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
20270- vsyscall_mode == NATIVE
20271- ? PAGE_KERNEL_VSYSCALL
20272- : PAGE_KERNEL_VVAR);
20273+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
20274 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
20275 (unsigned long)VSYSCALL_START);
20276
20277diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20278index 9796c2f..f686fbf 100644
20279--- a/arch/x86/kernel/x8664_ksyms_64.c
20280+++ b/arch/x86/kernel/x8664_ksyms_64.c
20281@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
20282 EXPORT_SYMBOL(copy_user_generic_string);
20283 EXPORT_SYMBOL(copy_user_generic_unrolled);
20284 EXPORT_SYMBOL(__copy_user_nocache);
20285-EXPORT_SYMBOL(_copy_from_user);
20286-EXPORT_SYMBOL(_copy_to_user);
20287
20288 EXPORT_SYMBOL(copy_page);
20289 EXPORT_SYMBOL(clear_page);
20290diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20291index bd18149..2ea0183 100644
20292--- a/arch/x86/kernel/xsave.c
20293+++ b/arch/x86/kernel/xsave.c
20294@@ -129,7 +129,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20295 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20296 return -EINVAL;
20297
20298- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20299+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20300 fx_sw_user->extended_size -
20301 FP_XSTATE_MAGIC2_SIZE));
20302 if (err)
20303@@ -265,7 +265,7 @@ fx_only:
20304 * the other extended state.
20305 */
20306 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20307- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20308+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20309 }
20310
20311 /*
20312@@ -294,7 +294,7 @@ int restore_i387_xstate(void __user *buf)
20313 if (use_xsave())
20314 err = restore_user_xstate(buf);
20315 else
20316- err = fxrstor_checking((__force struct i387_fxsave_struct *)
20317+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
20318 buf);
20319 if (unlikely(err)) {
20320 /*
20321diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
20322index 7df1c6d..9ea7c79 100644
20323--- a/arch/x86/kvm/cpuid.c
20324+++ b/arch/x86/kvm/cpuid.c
20325@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20326 struct kvm_cpuid2 *cpuid,
20327 struct kvm_cpuid_entry2 __user *entries)
20328 {
20329- int r;
20330+ int r, i;
20331
20332 r = -E2BIG;
20333 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
20334 goto out;
20335 r = -EFAULT;
20336- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
20337- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20338+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20339 goto out;
20340+ for (i = 0; i < cpuid->nent; ++i) {
20341+ struct kvm_cpuid_entry2 cpuid_entry;
20342+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20343+ goto out;
20344+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
20345+ }
20346 vcpu->arch.cpuid_nent = cpuid->nent;
20347 kvm_apic_set_version(vcpu);
20348 kvm_x86_ops->cpuid_update(vcpu);
20349@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20350 struct kvm_cpuid2 *cpuid,
20351 struct kvm_cpuid_entry2 __user *entries)
20352 {
20353- int r;
20354+ int r, i;
20355
20356 r = -E2BIG;
20357 if (cpuid->nent < vcpu->arch.cpuid_nent)
20358 goto out;
20359 r = -EFAULT;
20360- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20361- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20362+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20363 goto out;
20364+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20365+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20366+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20367+ goto out;
20368+ }
20369 return 0;
20370
20371 out:
20372diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20373index 4837375..2cc9722 100644
20374--- a/arch/x86/kvm/emulate.c
20375+++ b/arch/x86/kvm/emulate.c
20376@@ -256,6 +256,7 @@ struct gprefix {
20377
20378 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
20379 do { \
20380+ unsigned long _tmp; \
20381 __asm__ __volatile__ ( \
20382 _PRE_EFLAGS("0", "4", "2") \
20383 _op _suffix " %"_x"3,%1; " \
20384@@ -270,8 +271,6 @@ struct gprefix {
20385 /* Raw emulation: instruction has two explicit operands. */
20386 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
20387 do { \
20388- unsigned long _tmp; \
20389- \
20390 switch ((ctxt)->dst.bytes) { \
20391 case 2: \
20392 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
20393@@ -287,7 +286,6 @@ struct gprefix {
20394
20395 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20396 do { \
20397- unsigned long _tmp; \
20398 switch ((ctxt)->dst.bytes) { \
20399 case 1: \
20400 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
20401diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
20402index 93c1574..d6097dc 100644
20403--- a/arch/x86/kvm/lapic.c
20404+++ b/arch/x86/kvm/lapic.c
20405@@ -54,7 +54,7 @@
20406 #define APIC_BUS_CYCLE_NS 1
20407
20408 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20409-#define apic_debug(fmt, arg...)
20410+#define apic_debug(fmt, arg...) do {} while (0)
20411
20412 #define APIC_LVT_NUM 6
20413 /* 14 is the version for Xeon and Pentium 8.4.8*/
20414diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
20415index 34f9709..8eca2d5 100644
20416--- a/arch/x86/kvm/paging_tmpl.h
20417+++ b/arch/x86/kvm/paging_tmpl.h
20418@@ -197,7 +197,7 @@ retry_walk:
20419 if (unlikely(kvm_is_error_hva(host_addr)))
20420 goto error;
20421
20422- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20423+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20424 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20425 goto error;
20426
20427diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
20428index f75af40..285b18f 100644
20429--- a/arch/x86/kvm/svm.c
20430+++ b/arch/x86/kvm/svm.c
20431@@ -3516,7 +3516,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
20432 int cpu = raw_smp_processor_id();
20433
20434 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20435+
20436+ pax_open_kernel();
20437 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20438+ pax_close_kernel();
20439+
20440 load_TR_desc();
20441 }
20442
20443@@ -3894,6 +3898,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
20444 #endif
20445 #endif
20446
20447+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20448+ __set_fs(current_thread_info()->addr_limit);
20449+#endif
20450+
20451 reload_tss(vcpu);
20452
20453 local_irq_disable();
20454diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
20455index 86c8704..d9277bb 100644
20456--- a/arch/x86/kvm/vmx.c
20457+++ b/arch/x86/kvm/vmx.c
20458@@ -1317,7 +1317,11 @@ static void reload_tss(void)
20459 struct desc_struct *descs;
20460
20461 descs = (void *)gdt->address;
20462+
20463+ pax_open_kernel();
20464 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
20465+ pax_close_kernel();
20466+
20467 load_TR_desc();
20468 }
20469
20470@@ -1527,6 +1531,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
20471 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
20472 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
20473
20474+#ifdef CONFIG_PAX_PER_CPU_PGD
20475+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
20476+#endif
20477+
20478 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
20479 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
20480 vmx->loaded_vmcs->cpu = cpu;
20481@@ -2650,8 +2658,11 @@ static __init int hardware_setup(void)
20482 if (!cpu_has_vmx_flexpriority())
20483 flexpriority_enabled = 0;
20484
20485- if (!cpu_has_vmx_tpr_shadow())
20486- kvm_x86_ops->update_cr8_intercept = NULL;
20487+ if (!cpu_has_vmx_tpr_shadow()) {
20488+ pax_open_kernel();
20489+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
20490+ pax_close_kernel();
20491+ }
20492
20493 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20494 kvm_disable_largepages();
20495@@ -3697,7 +3708,10 @@ static void vmx_set_constant_host_state(void)
20496
20497 vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
20498 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
20499+
20500+#ifndef CONFIG_PAX_PER_CPU_PGD
20501 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
20502+#endif
20503
20504 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
20505 #ifdef CONFIG_X86_64
20506@@ -3719,7 +3733,7 @@ static void vmx_set_constant_host_state(void)
20507 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
20508
20509 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20510- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20511+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20512
20513 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20514 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
20515@@ -6257,6 +6271,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20516 "jmp .Lkvm_vmx_return \n\t"
20517 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20518 ".Lkvm_vmx_return: "
20519+
20520+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20521+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20522+ ".Lkvm_vmx_return2: "
20523+#endif
20524+
20525 /* Save guest registers, load host registers, keep flags */
20526 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20527 "pop %0 \n\t"
20528@@ -6305,6 +6325,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20529 #endif
20530 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20531 [wordsize]"i"(sizeof(ulong))
20532+
20533+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20534+ ,[cs]"i"(__KERNEL_CS)
20535+#endif
20536+
20537 : "cc", "memory"
20538 , R"ax", R"bx", R"di", R"si"
20539 #ifdef CONFIG_X86_64
20540@@ -6312,7 +6337,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20541 #endif
20542 );
20543
20544-#ifndef CONFIG_X86_64
20545+#ifdef CONFIG_X86_32
20546 /*
20547 * The sysexit path does not restore ds/es, so we must set them to
20548 * a reasonable value ourselves.
20549@@ -6321,8 +6346,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20550 * may be executed in interrupt context, which saves and restore segments
20551 * around it, nullifying its effect.
20552 */
20553- loadsegment(ds, __USER_DS);
20554- loadsegment(es, __USER_DS);
20555+ loadsegment(ds, __KERNEL_DS);
20556+ loadsegment(es, __KERNEL_DS);
20557+ loadsegment(ss, __KERNEL_DS);
20558+
20559+#ifdef CONFIG_PAX_KERNEXEC
20560+ loadsegment(fs, __KERNEL_PERCPU);
20561+#endif
20562+
20563+#ifdef CONFIG_PAX_MEMORY_UDEREF
20564+ __set_fs(current_thread_info()->addr_limit);
20565+#endif
20566+
20567 #endif
20568
20569 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
20570diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20571index 14c290d..0dae6e5 100644
20572--- a/arch/x86/kvm/x86.c
20573+++ b/arch/x86/kvm/x86.c
20574@@ -1361,8 +1361,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
20575 {
20576 struct kvm *kvm = vcpu->kvm;
20577 int lm = is_long_mode(vcpu);
20578- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20579- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20580+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20581+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20582 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20583 : kvm->arch.xen_hvm_config.blob_size_32;
20584 u32 page_num = data & ~PAGE_MASK;
20585@@ -2218,6 +2218,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
20586 if (n < msr_list.nmsrs)
20587 goto out;
20588 r = -EFAULT;
20589+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20590+ goto out;
20591 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20592 num_msrs_to_save * sizeof(u32)))
20593 goto out;
20594@@ -2343,7 +2345,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
20595 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20596 struct kvm_interrupt *irq)
20597 {
20598- if (irq->irq < 0 || irq->irq >= 256)
20599+ if (irq->irq >= 256)
20600 return -EINVAL;
20601 if (irqchip_in_kernel(vcpu->kvm))
20602 return -ENXIO;
20603@@ -4880,7 +4882,7 @@ static void kvm_set_mmio_spte_mask(void)
20604 kvm_mmu_set_mmio_spte_mask(mask);
20605 }
20606
20607-int kvm_arch_init(void *opaque)
20608+int kvm_arch_init(const void *opaque)
20609 {
20610 int r;
20611 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
20612diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
20613index 642d880..44e0f3f 100644
20614--- a/arch/x86/lguest/boot.c
20615+++ b/arch/x86/lguest/boot.c
20616@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
20617 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20618 * Launcher to reboot us.
20619 */
20620-static void lguest_restart(char *reason)
20621+static __noreturn void lguest_restart(char *reason)
20622 {
20623 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20624+ BUG();
20625 }
20626
20627 /*G:050
20628diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20629index 00933d5..3a64af9 100644
20630--- a/arch/x86/lib/atomic64_386_32.S
20631+++ b/arch/x86/lib/atomic64_386_32.S
20632@@ -48,6 +48,10 @@ BEGIN(read)
20633 movl (v), %eax
20634 movl 4(v), %edx
20635 RET_ENDP
20636+BEGIN(read_unchecked)
20637+ movl (v), %eax
20638+ movl 4(v), %edx
20639+RET_ENDP
20640 #undef v
20641
20642 #define v %esi
20643@@ -55,6 +59,10 @@ BEGIN(set)
20644 movl %ebx, (v)
20645 movl %ecx, 4(v)
20646 RET_ENDP
20647+BEGIN(set_unchecked)
20648+ movl %ebx, (v)
20649+ movl %ecx, 4(v)
20650+RET_ENDP
20651 #undef v
20652
20653 #define v %esi
20654@@ -70,6 +78,20 @@ RET_ENDP
20655 BEGIN(add)
20656 addl %eax, (v)
20657 adcl %edx, 4(v)
20658+
20659+#ifdef CONFIG_PAX_REFCOUNT
20660+ jno 0f
20661+ subl %eax, (v)
20662+ sbbl %edx, 4(v)
20663+ int $4
20664+0:
20665+ _ASM_EXTABLE(0b, 0b)
20666+#endif
20667+
20668+RET_ENDP
20669+BEGIN(add_unchecked)
20670+ addl %eax, (v)
20671+ adcl %edx, 4(v)
20672 RET_ENDP
20673 #undef v
20674
20675@@ -77,6 +99,24 @@ RET_ENDP
20676 BEGIN(add_return)
20677 addl (v), %eax
20678 adcl 4(v), %edx
20679+
20680+#ifdef CONFIG_PAX_REFCOUNT
20681+ into
20682+1234:
20683+ _ASM_EXTABLE(1234b, 2f)
20684+#endif
20685+
20686+ movl %eax, (v)
20687+ movl %edx, 4(v)
20688+
20689+#ifdef CONFIG_PAX_REFCOUNT
20690+2:
20691+#endif
20692+
20693+RET_ENDP
20694+BEGIN(add_return_unchecked)
20695+ addl (v), %eax
20696+ adcl 4(v), %edx
20697 movl %eax, (v)
20698 movl %edx, 4(v)
20699 RET_ENDP
20700@@ -86,6 +126,20 @@ RET_ENDP
20701 BEGIN(sub)
20702 subl %eax, (v)
20703 sbbl %edx, 4(v)
20704+
20705+#ifdef CONFIG_PAX_REFCOUNT
20706+ jno 0f
20707+ addl %eax, (v)
20708+ adcl %edx, 4(v)
20709+ int $4
20710+0:
20711+ _ASM_EXTABLE(0b, 0b)
20712+#endif
20713+
20714+RET_ENDP
20715+BEGIN(sub_unchecked)
20716+ subl %eax, (v)
20717+ sbbl %edx, 4(v)
20718 RET_ENDP
20719 #undef v
20720
20721@@ -96,6 +150,27 @@ BEGIN(sub_return)
20722 sbbl $0, %edx
20723 addl (v), %eax
20724 adcl 4(v), %edx
20725+
20726+#ifdef CONFIG_PAX_REFCOUNT
20727+ into
20728+1234:
20729+ _ASM_EXTABLE(1234b, 2f)
20730+#endif
20731+
20732+ movl %eax, (v)
20733+ movl %edx, 4(v)
20734+
20735+#ifdef CONFIG_PAX_REFCOUNT
20736+2:
20737+#endif
20738+
20739+RET_ENDP
20740+BEGIN(sub_return_unchecked)
20741+ negl %edx
20742+ negl %eax
20743+ sbbl $0, %edx
20744+ addl (v), %eax
20745+ adcl 4(v), %edx
20746 movl %eax, (v)
20747 movl %edx, 4(v)
20748 RET_ENDP
20749@@ -105,6 +180,20 @@ RET_ENDP
20750 BEGIN(inc)
20751 addl $1, (v)
20752 adcl $0, 4(v)
20753+
20754+#ifdef CONFIG_PAX_REFCOUNT
20755+ jno 0f
20756+ subl $1, (v)
20757+ sbbl $0, 4(v)
20758+ int $4
20759+0:
20760+ _ASM_EXTABLE(0b, 0b)
20761+#endif
20762+
20763+RET_ENDP
20764+BEGIN(inc_unchecked)
20765+ addl $1, (v)
20766+ adcl $0, 4(v)
20767 RET_ENDP
20768 #undef v
20769
20770@@ -114,6 +203,26 @@ BEGIN(inc_return)
20771 movl 4(v), %edx
20772 addl $1, %eax
20773 adcl $0, %edx
20774+
20775+#ifdef CONFIG_PAX_REFCOUNT
20776+ into
20777+1234:
20778+ _ASM_EXTABLE(1234b, 2f)
20779+#endif
20780+
20781+ movl %eax, (v)
20782+ movl %edx, 4(v)
20783+
20784+#ifdef CONFIG_PAX_REFCOUNT
20785+2:
20786+#endif
20787+
20788+RET_ENDP
20789+BEGIN(inc_return_unchecked)
20790+ movl (v), %eax
20791+ movl 4(v), %edx
20792+ addl $1, %eax
20793+ adcl $0, %edx
20794 movl %eax, (v)
20795 movl %edx, 4(v)
20796 RET_ENDP
20797@@ -123,6 +232,20 @@ RET_ENDP
20798 BEGIN(dec)
20799 subl $1, (v)
20800 sbbl $0, 4(v)
20801+
20802+#ifdef CONFIG_PAX_REFCOUNT
20803+ jno 0f
20804+ addl $1, (v)
20805+ adcl $0, 4(v)
20806+ int $4
20807+0:
20808+ _ASM_EXTABLE(0b, 0b)
20809+#endif
20810+
20811+RET_ENDP
20812+BEGIN(dec_unchecked)
20813+ subl $1, (v)
20814+ sbbl $0, 4(v)
20815 RET_ENDP
20816 #undef v
20817
20818@@ -132,6 +255,26 @@ BEGIN(dec_return)
20819 movl 4(v), %edx
20820 subl $1, %eax
20821 sbbl $0, %edx
20822+
20823+#ifdef CONFIG_PAX_REFCOUNT
20824+ into
20825+1234:
20826+ _ASM_EXTABLE(1234b, 2f)
20827+#endif
20828+
20829+ movl %eax, (v)
20830+ movl %edx, 4(v)
20831+
20832+#ifdef CONFIG_PAX_REFCOUNT
20833+2:
20834+#endif
20835+
20836+RET_ENDP
20837+BEGIN(dec_return_unchecked)
20838+ movl (v), %eax
20839+ movl 4(v), %edx
20840+ subl $1, %eax
20841+ sbbl $0, %edx
20842 movl %eax, (v)
20843 movl %edx, 4(v)
20844 RET_ENDP
20845@@ -143,6 +286,13 @@ BEGIN(add_unless)
20846 adcl %edx, %edi
20847 addl (v), %eax
20848 adcl 4(v), %edx
20849+
20850+#ifdef CONFIG_PAX_REFCOUNT
20851+ into
20852+1234:
20853+ _ASM_EXTABLE(1234b, 2f)
20854+#endif
20855+
20856 cmpl %eax, %ecx
20857 je 3f
20858 1:
20859@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20860 1:
20861 addl $1, %eax
20862 adcl $0, %edx
20863+
20864+#ifdef CONFIG_PAX_REFCOUNT
20865+ into
20866+1234:
20867+ _ASM_EXTABLE(1234b, 2f)
20868+#endif
20869+
20870 movl %eax, (v)
20871 movl %edx, 4(v)
20872 movl $1, %eax
20873@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20874 movl 4(v), %edx
20875 subl $1, %eax
20876 sbbl $0, %edx
20877+
20878+#ifdef CONFIG_PAX_REFCOUNT
20879+ into
20880+1234:
20881+ _ASM_EXTABLE(1234b, 1f)
20882+#endif
20883+
20884 js 1f
20885 movl %eax, (v)
20886 movl %edx, 4(v)
20887diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20888index f5cc9eb..51fa319 100644
20889--- a/arch/x86/lib/atomic64_cx8_32.S
20890+++ b/arch/x86/lib/atomic64_cx8_32.S
20891@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20892 CFI_STARTPROC
20893
20894 read64 %ecx
20895+ pax_force_retaddr
20896 ret
20897 CFI_ENDPROC
20898 ENDPROC(atomic64_read_cx8)
20899
20900+ENTRY(atomic64_read_unchecked_cx8)
20901+ CFI_STARTPROC
20902+
20903+ read64 %ecx
20904+ pax_force_retaddr
20905+ ret
20906+ CFI_ENDPROC
20907+ENDPROC(atomic64_read_unchecked_cx8)
20908+
20909 ENTRY(atomic64_set_cx8)
20910 CFI_STARTPROC
20911
20912@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20913 cmpxchg8b (%esi)
20914 jne 1b
20915
20916+ pax_force_retaddr
20917 ret
20918 CFI_ENDPROC
20919 ENDPROC(atomic64_set_cx8)
20920
20921+ENTRY(atomic64_set_unchecked_cx8)
20922+ CFI_STARTPROC
20923+
20924+1:
20925+/* we don't need LOCK_PREFIX since aligned 64-bit writes
20926+ * are atomic on 586 and newer */
20927+ cmpxchg8b (%esi)
20928+ jne 1b
20929+
20930+ pax_force_retaddr
20931+ ret
20932+ CFI_ENDPROC
20933+ENDPROC(atomic64_set_unchecked_cx8)
20934+
20935 ENTRY(atomic64_xchg_cx8)
20936 CFI_STARTPROC
20937
20938@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
20939 cmpxchg8b (%esi)
20940 jne 1b
20941
20942+ pax_force_retaddr
20943 ret
20944 CFI_ENDPROC
20945 ENDPROC(atomic64_xchg_cx8)
20946
20947-.macro addsub_return func ins insc
20948-ENTRY(atomic64_\func\()_return_cx8)
20949+.macro addsub_return func ins insc unchecked=""
20950+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20951 CFI_STARTPROC
20952 SAVE ebp
20953 SAVE ebx
20954@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
20955 movl %edx, %ecx
20956 \ins\()l %esi, %ebx
20957 \insc\()l %edi, %ecx
20958+
20959+.ifb \unchecked
20960+#ifdef CONFIG_PAX_REFCOUNT
20961+ into
20962+2:
20963+ _ASM_EXTABLE(2b, 3f)
20964+#endif
20965+.endif
20966+
20967 LOCK_PREFIX
20968 cmpxchg8b (%ebp)
20969 jne 1b
20970-
20971-10:
20972 movl %ebx, %eax
20973 movl %ecx, %edx
20974+
20975+.ifb \unchecked
20976+#ifdef CONFIG_PAX_REFCOUNT
20977+3:
20978+#endif
20979+.endif
20980+
20981 RESTORE edi
20982 RESTORE esi
20983 RESTORE ebx
20984 RESTORE ebp
20985+ pax_force_retaddr
20986 ret
20987 CFI_ENDPROC
20988-ENDPROC(atomic64_\func\()_return_cx8)
20989+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20990 .endm
20991
20992 addsub_return add add adc
20993 addsub_return sub sub sbb
20994+addsub_return add add adc _unchecked
20995+addsub_return sub sub sbb _unchecked
20996
20997-.macro incdec_return func ins insc
20998-ENTRY(atomic64_\func\()_return_cx8)
20999+.macro incdec_return func ins insc unchecked=""
21000+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21001 CFI_STARTPROC
21002 SAVE ebx
21003
21004@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
21005 movl %edx, %ecx
21006 \ins\()l $1, %ebx
21007 \insc\()l $0, %ecx
21008+
21009+.ifb \unchecked
21010+#ifdef CONFIG_PAX_REFCOUNT
21011+ into
21012+2:
21013+ _ASM_EXTABLE(2b, 3f)
21014+#endif
21015+.endif
21016+
21017 LOCK_PREFIX
21018 cmpxchg8b (%esi)
21019 jne 1b
21020
21021-10:
21022 movl %ebx, %eax
21023 movl %ecx, %edx
21024+
21025+.ifb \unchecked
21026+#ifdef CONFIG_PAX_REFCOUNT
21027+3:
21028+#endif
21029+.endif
21030+
21031 RESTORE ebx
21032+ pax_force_retaddr
21033 ret
21034 CFI_ENDPROC
21035-ENDPROC(atomic64_\func\()_return_cx8)
21036+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21037 .endm
21038
21039 incdec_return inc add adc
21040 incdec_return dec sub sbb
21041+incdec_return inc add adc _unchecked
21042+incdec_return dec sub sbb _unchecked
21043
21044 ENTRY(atomic64_dec_if_positive_cx8)
21045 CFI_STARTPROC
21046@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
21047 movl %edx, %ecx
21048 subl $1, %ebx
21049 sbb $0, %ecx
21050+
21051+#ifdef CONFIG_PAX_REFCOUNT
21052+ into
21053+1234:
21054+ _ASM_EXTABLE(1234b, 2f)
21055+#endif
21056+
21057 js 2f
21058 LOCK_PREFIX
21059 cmpxchg8b (%esi)
21060@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
21061 movl %ebx, %eax
21062 movl %ecx, %edx
21063 RESTORE ebx
21064+ pax_force_retaddr
21065 ret
21066 CFI_ENDPROC
21067 ENDPROC(atomic64_dec_if_positive_cx8)
21068@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
21069 movl %edx, %ecx
21070 addl %ebp, %ebx
21071 adcl %edi, %ecx
21072+
21073+#ifdef CONFIG_PAX_REFCOUNT
21074+ into
21075+1234:
21076+ _ASM_EXTABLE(1234b, 3f)
21077+#endif
21078+
21079 LOCK_PREFIX
21080 cmpxchg8b (%esi)
21081 jne 1b
21082@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
21083 CFI_ADJUST_CFA_OFFSET -8
21084 RESTORE ebx
21085 RESTORE ebp
21086+ pax_force_retaddr
21087 ret
21088 4:
21089 cmpl %edx, 4(%esp)
21090@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
21091 xorl %ecx, %ecx
21092 addl $1, %ebx
21093 adcl %edx, %ecx
21094+
21095+#ifdef CONFIG_PAX_REFCOUNT
21096+ into
21097+1234:
21098+ _ASM_EXTABLE(1234b, 3f)
21099+#endif
21100+
21101 LOCK_PREFIX
21102 cmpxchg8b (%esi)
21103 jne 1b
21104@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
21105 movl $1, %eax
21106 3:
21107 RESTORE ebx
21108+ pax_force_retaddr
21109 ret
21110 CFI_ENDPROC
21111 ENDPROC(atomic64_inc_not_zero_cx8)
21112diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21113index 2af5df3..62b1a5a 100644
21114--- a/arch/x86/lib/checksum_32.S
21115+++ b/arch/x86/lib/checksum_32.S
21116@@ -29,7 +29,8 @@
21117 #include <asm/dwarf2.h>
21118 #include <asm/errno.h>
21119 #include <asm/asm.h>
21120-
21121+#include <asm/segment.h>
21122+
21123 /*
21124 * computes a partial checksum, e.g. for TCP/UDP fragments
21125 */
21126@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21127
21128 #define ARGBASE 16
21129 #define FP 12
21130-
21131-ENTRY(csum_partial_copy_generic)
21132+
21133+ENTRY(csum_partial_copy_generic_to_user)
21134 CFI_STARTPROC
21135+
21136+#ifdef CONFIG_PAX_MEMORY_UDEREF
21137+ pushl_cfi %gs
21138+ popl_cfi %es
21139+ jmp csum_partial_copy_generic
21140+#endif
21141+
21142+ENTRY(csum_partial_copy_generic_from_user)
21143+
21144+#ifdef CONFIG_PAX_MEMORY_UDEREF
21145+ pushl_cfi %gs
21146+ popl_cfi %ds
21147+#endif
21148+
21149+ENTRY(csum_partial_copy_generic)
21150 subl $4,%esp
21151 CFI_ADJUST_CFA_OFFSET 4
21152 pushl_cfi %edi
21153@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
21154 jmp 4f
21155 SRC(1: movw (%esi), %bx )
21156 addl $2, %esi
21157-DST( movw %bx, (%edi) )
21158+DST( movw %bx, %es:(%edi) )
21159 addl $2, %edi
21160 addw %bx, %ax
21161 adcl $0, %eax
21162@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
21163 SRC(1: movl (%esi), %ebx )
21164 SRC( movl 4(%esi), %edx )
21165 adcl %ebx, %eax
21166-DST( movl %ebx, (%edi) )
21167+DST( movl %ebx, %es:(%edi) )
21168 adcl %edx, %eax
21169-DST( movl %edx, 4(%edi) )
21170+DST( movl %edx, %es:4(%edi) )
21171
21172 SRC( movl 8(%esi), %ebx )
21173 SRC( movl 12(%esi), %edx )
21174 adcl %ebx, %eax
21175-DST( movl %ebx, 8(%edi) )
21176+DST( movl %ebx, %es:8(%edi) )
21177 adcl %edx, %eax
21178-DST( movl %edx, 12(%edi) )
21179+DST( movl %edx, %es:12(%edi) )
21180
21181 SRC( movl 16(%esi), %ebx )
21182 SRC( movl 20(%esi), %edx )
21183 adcl %ebx, %eax
21184-DST( movl %ebx, 16(%edi) )
21185+DST( movl %ebx, %es:16(%edi) )
21186 adcl %edx, %eax
21187-DST( movl %edx, 20(%edi) )
21188+DST( movl %edx, %es:20(%edi) )
21189
21190 SRC( movl 24(%esi), %ebx )
21191 SRC( movl 28(%esi), %edx )
21192 adcl %ebx, %eax
21193-DST( movl %ebx, 24(%edi) )
21194+DST( movl %ebx, %es:24(%edi) )
21195 adcl %edx, %eax
21196-DST( movl %edx, 28(%edi) )
21197+DST( movl %edx, %es:28(%edi) )
21198
21199 lea 32(%esi), %esi
21200 lea 32(%edi), %edi
21201@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
21202 shrl $2, %edx # This clears CF
21203 SRC(3: movl (%esi), %ebx )
21204 adcl %ebx, %eax
21205-DST( movl %ebx, (%edi) )
21206+DST( movl %ebx, %es:(%edi) )
21207 lea 4(%esi), %esi
21208 lea 4(%edi), %edi
21209 dec %edx
21210@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
21211 jb 5f
21212 SRC( movw (%esi), %cx )
21213 leal 2(%esi), %esi
21214-DST( movw %cx, (%edi) )
21215+DST( movw %cx, %es:(%edi) )
21216 leal 2(%edi), %edi
21217 je 6f
21218 shll $16,%ecx
21219 SRC(5: movb (%esi), %cl )
21220-DST( movb %cl, (%edi) )
21221+DST( movb %cl, %es:(%edi) )
21222 6: addl %ecx, %eax
21223 adcl $0, %eax
21224 7:
21225@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
21226
21227 6001:
21228 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21229- movl $-EFAULT, (%ebx)
21230+ movl $-EFAULT, %ss:(%ebx)
21231
21232 # zero the complete destination - computing the rest
21233 # is too much work
21234@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
21235
21236 6002:
21237 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21238- movl $-EFAULT,(%ebx)
21239+ movl $-EFAULT,%ss:(%ebx)
21240 jmp 5000b
21241
21242 .previous
21243
21244+ pushl_cfi %ss
21245+ popl_cfi %ds
21246+ pushl_cfi %ss
21247+ popl_cfi %es
21248 popl_cfi %ebx
21249 CFI_RESTORE ebx
21250 popl_cfi %esi
21251@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
21252 popl_cfi %ecx # equivalent to addl $4,%esp
21253 ret
21254 CFI_ENDPROC
21255-ENDPROC(csum_partial_copy_generic)
21256+ENDPROC(csum_partial_copy_generic_to_user)
21257
21258 #else
21259
21260 /* Version for PentiumII/PPro */
21261
21262 #define ROUND1(x) \
21263+ nop; nop; nop; \
21264 SRC(movl x(%esi), %ebx ) ; \
21265 addl %ebx, %eax ; \
21266- DST(movl %ebx, x(%edi) ) ;
21267+ DST(movl %ebx, %es:x(%edi)) ;
21268
21269 #define ROUND(x) \
21270+ nop; nop; nop; \
21271 SRC(movl x(%esi), %ebx ) ; \
21272 adcl %ebx, %eax ; \
21273- DST(movl %ebx, x(%edi) ) ;
21274+ DST(movl %ebx, %es:x(%edi)) ;
21275
21276 #define ARGBASE 12
21277-
21278-ENTRY(csum_partial_copy_generic)
21279+
21280+ENTRY(csum_partial_copy_generic_to_user)
21281 CFI_STARTPROC
21282+
21283+#ifdef CONFIG_PAX_MEMORY_UDEREF
21284+ pushl_cfi %gs
21285+ popl_cfi %es
21286+ jmp csum_partial_copy_generic
21287+#endif
21288+
21289+ENTRY(csum_partial_copy_generic_from_user)
21290+
21291+#ifdef CONFIG_PAX_MEMORY_UDEREF
21292+ pushl_cfi %gs
21293+ popl_cfi %ds
21294+#endif
21295+
21296+ENTRY(csum_partial_copy_generic)
21297 pushl_cfi %ebx
21298 CFI_REL_OFFSET ebx, 0
21299 pushl_cfi %edi
21300@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
21301 subl %ebx, %edi
21302 lea -1(%esi),%edx
21303 andl $-32,%edx
21304- lea 3f(%ebx,%ebx), %ebx
21305+ lea 3f(%ebx,%ebx,2), %ebx
21306 testl %esi, %esi
21307 jmp *%ebx
21308 1: addl $64,%esi
21309@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
21310 jb 5f
21311 SRC( movw (%esi), %dx )
21312 leal 2(%esi), %esi
21313-DST( movw %dx, (%edi) )
21314+DST( movw %dx, %es:(%edi) )
21315 leal 2(%edi), %edi
21316 je 6f
21317 shll $16,%edx
21318 5:
21319 SRC( movb (%esi), %dl )
21320-DST( movb %dl, (%edi) )
21321+DST( movb %dl, %es:(%edi) )
21322 6: addl %edx, %eax
21323 adcl $0, %eax
21324 7:
21325 .section .fixup, "ax"
21326 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21327- movl $-EFAULT, (%ebx)
21328+ movl $-EFAULT, %ss:(%ebx)
21329 # zero the complete destination (computing the rest is too much work)
21330 movl ARGBASE+8(%esp),%edi # dst
21331 movl ARGBASE+12(%esp),%ecx # len
21332@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
21333 rep; stosb
21334 jmp 7b
21335 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21336- movl $-EFAULT, (%ebx)
21337+ movl $-EFAULT, %ss:(%ebx)
21338 jmp 7b
21339 .previous
21340
21341+#ifdef CONFIG_PAX_MEMORY_UDEREF
21342+ pushl_cfi %ss
21343+ popl_cfi %ds
21344+ pushl_cfi %ss
21345+ popl_cfi %es
21346+#endif
21347+
21348 popl_cfi %esi
21349 CFI_RESTORE esi
21350 popl_cfi %edi
21351@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
21352 CFI_RESTORE ebx
21353 ret
21354 CFI_ENDPROC
21355-ENDPROC(csum_partial_copy_generic)
21356+ENDPROC(csum_partial_copy_generic_to_user)
21357
21358 #undef ROUND
21359 #undef ROUND1
21360diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21361index f2145cf..cea889d 100644
21362--- a/arch/x86/lib/clear_page_64.S
21363+++ b/arch/x86/lib/clear_page_64.S
21364@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
21365 movl $4096/8,%ecx
21366 xorl %eax,%eax
21367 rep stosq
21368+ pax_force_retaddr
21369 ret
21370 CFI_ENDPROC
21371 ENDPROC(clear_page_c)
21372@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
21373 movl $4096,%ecx
21374 xorl %eax,%eax
21375 rep stosb
21376+ pax_force_retaddr
21377 ret
21378 CFI_ENDPROC
21379 ENDPROC(clear_page_c_e)
21380@@ -43,6 +45,7 @@ ENTRY(clear_page)
21381 leaq 64(%rdi),%rdi
21382 jnz .Lloop
21383 nop
21384+ pax_force_retaddr
21385 ret
21386 CFI_ENDPROC
21387 .Lclear_page_end:
21388@@ -58,7 +61,7 @@ ENDPROC(clear_page)
21389
21390 #include <asm/cpufeature.h>
21391
21392- .section .altinstr_replacement,"ax"
21393+ .section .altinstr_replacement,"a"
21394 1: .byte 0xeb /* jmp <disp8> */
21395 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21396 2: .byte 0xeb /* jmp <disp8> */
21397diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21398index 1e572c5..2a162cd 100644
21399--- a/arch/x86/lib/cmpxchg16b_emu.S
21400+++ b/arch/x86/lib/cmpxchg16b_emu.S
21401@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21402
21403 popf
21404 mov $1, %al
21405+ pax_force_retaddr
21406 ret
21407
21408 not_same:
21409 popf
21410 xor %al,%al
21411+ pax_force_retaddr
21412 ret
21413
21414 CFI_ENDPROC
21415diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21416index 6b34d04..dccb07f 100644
21417--- a/arch/x86/lib/copy_page_64.S
21418+++ b/arch/x86/lib/copy_page_64.S
21419@@ -9,6 +9,7 @@ copy_page_c:
21420 CFI_STARTPROC
21421 movl $4096/8,%ecx
21422 rep movsq
21423+ pax_force_retaddr
21424 ret
21425 CFI_ENDPROC
21426 ENDPROC(copy_page_c)
21427@@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
21428
21429 ENTRY(copy_page)
21430 CFI_STARTPROC
21431- subq $2*8,%rsp
21432- CFI_ADJUST_CFA_OFFSET 2*8
21433+ subq $3*8,%rsp
21434+ CFI_ADJUST_CFA_OFFSET 3*8
21435 movq %rbx,(%rsp)
21436 CFI_REL_OFFSET rbx, 0
21437 movq %r12,1*8(%rsp)
21438 CFI_REL_OFFSET r12, 1*8
21439+ movq %r13,2*8(%rsp)
21440+ CFI_REL_OFFSET r13, 2*8
21441
21442 movl $(4096/64)-5,%ecx
21443 .p2align 4
21444@@ -37,7 +40,7 @@ ENTRY(copy_page)
21445 movq 16 (%rsi), %rdx
21446 movq 24 (%rsi), %r8
21447 movq 32 (%rsi), %r9
21448- movq 40 (%rsi), %r10
21449+ movq 40 (%rsi), %r13
21450 movq 48 (%rsi), %r11
21451 movq 56 (%rsi), %r12
21452
21453@@ -48,7 +51,7 @@ ENTRY(copy_page)
21454 movq %rdx, 16 (%rdi)
21455 movq %r8, 24 (%rdi)
21456 movq %r9, 32 (%rdi)
21457- movq %r10, 40 (%rdi)
21458+ movq %r13, 40 (%rdi)
21459 movq %r11, 48 (%rdi)
21460 movq %r12, 56 (%rdi)
21461
21462@@ -67,7 +70,7 @@ ENTRY(copy_page)
21463 movq 16 (%rsi), %rdx
21464 movq 24 (%rsi), %r8
21465 movq 32 (%rsi), %r9
21466- movq 40 (%rsi), %r10
21467+ movq 40 (%rsi), %r13
21468 movq 48 (%rsi), %r11
21469 movq 56 (%rsi), %r12
21470
21471@@ -76,7 +79,7 @@ ENTRY(copy_page)
21472 movq %rdx, 16 (%rdi)
21473 movq %r8, 24 (%rdi)
21474 movq %r9, 32 (%rdi)
21475- movq %r10, 40 (%rdi)
21476+ movq %r13, 40 (%rdi)
21477 movq %r11, 48 (%rdi)
21478 movq %r12, 56 (%rdi)
21479
21480@@ -89,8 +92,11 @@ ENTRY(copy_page)
21481 CFI_RESTORE rbx
21482 movq 1*8(%rsp),%r12
21483 CFI_RESTORE r12
21484- addq $2*8,%rsp
21485- CFI_ADJUST_CFA_OFFSET -2*8
21486+ movq 2*8(%rsp),%r13
21487+ CFI_RESTORE r13
21488+ addq $3*8,%rsp
21489+ CFI_ADJUST_CFA_OFFSET -3*8
21490+ pax_force_retaddr
21491 ret
21492 .Lcopy_page_end:
21493 CFI_ENDPROC
21494@@ -101,7 +107,7 @@ ENDPROC(copy_page)
21495
21496 #include <asm/cpufeature.h>
21497
21498- .section .altinstr_replacement,"ax"
21499+ .section .altinstr_replacement,"a"
21500 1: .byte 0xeb /* jmp <disp8> */
21501 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21502 2:
21503diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21504index 5b2995f..78e7644 100644
21505--- a/arch/x86/lib/copy_user_64.S
21506+++ b/arch/x86/lib/copy_user_64.S
21507@@ -17,6 +17,7 @@
21508 #include <asm/cpufeature.h>
21509 #include <asm/alternative-asm.h>
21510 #include <asm/asm.h>
21511+#include <asm/pgtable.h>
21512
21513 /*
21514 * By placing feature2 after feature1 in altinstructions section, we logically
21515@@ -30,7 +31,7 @@
21516 .byte 0xe9 /* 32bit jump */
21517 .long \orig-1f /* by default jump to orig */
21518 1:
21519- .section .altinstr_replacement,"ax"
21520+ .section .altinstr_replacement,"a"
21521 2: .byte 0xe9 /* near jump with 32bit immediate */
21522 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21523 3: .byte 0xe9 /* near jump with 32bit immediate */
21524@@ -69,47 +70,20 @@
21525 #endif
21526 .endm
21527
21528-/* Standard copy_to_user with segment limit checking */
21529-ENTRY(_copy_to_user)
21530- CFI_STARTPROC
21531- GET_THREAD_INFO(%rax)
21532- movq %rdi,%rcx
21533- addq %rdx,%rcx
21534- jc bad_to_user
21535- cmpq TI_addr_limit(%rax),%rcx
21536- ja bad_to_user
21537- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21538- copy_user_generic_unrolled,copy_user_generic_string, \
21539- copy_user_enhanced_fast_string
21540- CFI_ENDPROC
21541-ENDPROC(_copy_to_user)
21542-
21543-/* Standard copy_from_user with segment limit checking */
21544-ENTRY(_copy_from_user)
21545- CFI_STARTPROC
21546- GET_THREAD_INFO(%rax)
21547- movq %rsi,%rcx
21548- addq %rdx,%rcx
21549- jc bad_from_user
21550- cmpq TI_addr_limit(%rax),%rcx
21551- ja bad_from_user
21552- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21553- copy_user_generic_unrolled,copy_user_generic_string, \
21554- copy_user_enhanced_fast_string
21555- CFI_ENDPROC
21556-ENDPROC(_copy_from_user)
21557-
21558 .section .fixup,"ax"
21559 /* must zero dest */
21560 ENTRY(bad_from_user)
21561 bad_from_user:
21562 CFI_STARTPROC
21563+ testl %edx,%edx
21564+ js bad_to_user
21565 movl %edx,%ecx
21566 xorl %eax,%eax
21567 rep
21568 stosb
21569 bad_to_user:
21570 movl %edx,%eax
21571+ pax_force_retaddr
21572 ret
21573 CFI_ENDPROC
21574 ENDPROC(bad_from_user)
21575@@ -139,19 +113,19 @@ ENTRY(copy_user_generic_unrolled)
21576 jz 17f
21577 1: movq (%rsi),%r8
21578 2: movq 1*8(%rsi),%r9
21579-3: movq 2*8(%rsi),%r10
21580+3: movq 2*8(%rsi),%rax
21581 4: movq 3*8(%rsi),%r11
21582 5: movq %r8,(%rdi)
21583 6: movq %r9,1*8(%rdi)
21584-7: movq %r10,2*8(%rdi)
21585+7: movq %rax,2*8(%rdi)
21586 8: movq %r11,3*8(%rdi)
21587 9: movq 4*8(%rsi),%r8
21588 10: movq 5*8(%rsi),%r9
21589-11: movq 6*8(%rsi),%r10
21590+11: movq 6*8(%rsi),%rax
21591 12: movq 7*8(%rsi),%r11
21592 13: movq %r8,4*8(%rdi)
21593 14: movq %r9,5*8(%rdi)
21594-15: movq %r10,6*8(%rdi)
21595+15: movq %rax,6*8(%rdi)
21596 16: movq %r11,7*8(%rdi)
21597 leaq 64(%rsi),%rsi
21598 leaq 64(%rdi),%rdi
21599@@ -177,6 +151,7 @@ ENTRY(copy_user_generic_unrolled)
21600 decl %ecx
21601 jnz 21b
21602 23: xor %eax,%eax
21603+ pax_force_retaddr
21604 ret
21605
21606 .section .fixup,"ax"
21607@@ -246,6 +221,7 @@ ENTRY(copy_user_generic_string)
21608 3: rep
21609 movsb
21610 4: xorl %eax,%eax
21611+ pax_force_retaddr
21612 ret
21613
21614 .section .fixup,"ax"
21615@@ -279,6 +255,7 @@ ENTRY(copy_user_enhanced_fast_string)
21616 1: rep
21617 movsb
21618 2: xorl %eax,%eax
21619+ pax_force_retaddr
21620 ret
21621
21622 .section .fixup,"ax"
21623diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21624index cacddc7..09d49e4 100644
21625--- a/arch/x86/lib/copy_user_nocache_64.S
21626+++ b/arch/x86/lib/copy_user_nocache_64.S
21627@@ -8,6 +8,7 @@
21628
21629 #include <linux/linkage.h>
21630 #include <asm/dwarf2.h>
21631+#include <asm/alternative-asm.h>
21632
21633 #define FIX_ALIGNMENT 1
21634
21635@@ -15,6 +16,7 @@
21636 #include <asm/asm-offsets.h>
21637 #include <asm/thread_info.h>
21638 #include <asm/asm.h>
21639+#include <asm/pgtable.h>
21640
21641 .macro ALIGN_DESTINATION
21642 #ifdef FIX_ALIGNMENT
21643@@ -48,6 +50,15 @@
21644 */
21645 ENTRY(__copy_user_nocache)
21646 CFI_STARTPROC
21647+
21648+#ifdef CONFIG_PAX_MEMORY_UDEREF
21649+ mov $PAX_USER_SHADOW_BASE,%rcx
21650+ cmp %rcx,%rsi
21651+ jae 1f
21652+ add %rcx,%rsi
21653+1:
21654+#endif
21655+
21656 cmpl $8,%edx
21657 jb 20f /* less then 8 bytes, go to byte copy loop */
21658 ALIGN_DESTINATION
21659@@ -57,19 +68,19 @@ ENTRY(__copy_user_nocache)
21660 jz 17f
21661 1: movq (%rsi),%r8
21662 2: movq 1*8(%rsi),%r9
21663-3: movq 2*8(%rsi),%r10
21664+3: movq 2*8(%rsi),%rax
21665 4: movq 3*8(%rsi),%r11
21666 5: movnti %r8,(%rdi)
21667 6: movnti %r9,1*8(%rdi)
21668-7: movnti %r10,2*8(%rdi)
21669+7: movnti %rax,2*8(%rdi)
21670 8: movnti %r11,3*8(%rdi)
21671 9: movq 4*8(%rsi),%r8
21672 10: movq 5*8(%rsi),%r9
21673-11: movq 6*8(%rsi),%r10
21674+11: movq 6*8(%rsi),%rax
21675 12: movq 7*8(%rsi),%r11
21676 13: movnti %r8,4*8(%rdi)
21677 14: movnti %r9,5*8(%rdi)
21678-15: movnti %r10,6*8(%rdi)
21679+15: movnti %rax,6*8(%rdi)
21680 16: movnti %r11,7*8(%rdi)
21681 leaq 64(%rsi),%rsi
21682 leaq 64(%rdi),%rdi
21683@@ -96,6 +107,7 @@ ENTRY(__copy_user_nocache)
21684 jnz 21b
21685 23: xorl %eax,%eax
21686 sfence
21687+ pax_force_retaddr
21688 ret
21689
21690 .section .fixup,"ax"
21691diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21692index 2419d5f..953ee51 100644
21693--- a/arch/x86/lib/csum-copy_64.S
21694+++ b/arch/x86/lib/csum-copy_64.S
21695@@ -9,6 +9,7 @@
21696 #include <asm/dwarf2.h>
21697 #include <asm/errno.h>
21698 #include <asm/asm.h>
21699+#include <asm/alternative-asm.h>
21700
21701 /*
21702 * Checksum copy with exception handling.
21703@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
21704 CFI_RESTORE rbp
21705 addq $7*8, %rsp
21706 CFI_ADJUST_CFA_OFFSET -7*8
21707+ pax_force_retaddr 0, 1
21708 ret
21709 CFI_RESTORE_STATE
21710
21711diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21712index 25b7ae8..3b52ccd 100644
21713--- a/arch/x86/lib/csum-wrappers_64.c
21714+++ b/arch/x86/lib/csum-wrappers_64.c
21715@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21716 len -= 2;
21717 }
21718 }
21719- isum = csum_partial_copy_generic((__force const void *)src,
21720+
21721+#ifdef CONFIG_PAX_MEMORY_UDEREF
21722+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21723+ src += PAX_USER_SHADOW_BASE;
21724+#endif
21725+
21726+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
21727 dst, len, isum, errp, NULL);
21728 if (unlikely(*errp))
21729 goto out_err;
21730@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21731 }
21732
21733 *errp = 0;
21734- return csum_partial_copy_generic(src, (void __force *)dst,
21735+
21736+#ifdef CONFIG_PAX_MEMORY_UDEREF
21737+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21738+ dst += PAX_USER_SHADOW_BASE;
21739+#endif
21740+
21741+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21742 len, isum, NULL, errp);
21743 }
21744 EXPORT_SYMBOL(csum_partial_copy_to_user);
21745diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21746index b33b1fb..219f389 100644
21747--- a/arch/x86/lib/getuser.S
21748+++ b/arch/x86/lib/getuser.S
21749@@ -33,15 +33,38 @@
21750 #include <asm/asm-offsets.h>
21751 #include <asm/thread_info.h>
21752 #include <asm/asm.h>
21753+#include <asm/segment.h>
21754+#include <asm/pgtable.h>
21755+#include <asm/alternative-asm.h>
21756+
21757+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21758+#define __copyuser_seg gs;
21759+#else
21760+#define __copyuser_seg
21761+#endif
21762
21763 .text
21764 ENTRY(__get_user_1)
21765 CFI_STARTPROC
21766+
21767+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21768 GET_THREAD_INFO(%_ASM_DX)
21769 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21770 jae bad_get_user
21771-1: movzb (%_ASM_AX),%edx
21772+
21773+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21774+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21775+ cmp %_ASM_DX,%_ASM_AX
21776+ jae 1234f
21777+ add %_ASM_DX,%_ASM_AX
21778+1234:
21779+#endif
21780+
21781+#endif
21782+
21783+1: __copyuser_seg movzb (%_ASM_AX),%edx
21784 xor %eax,%eax
21785+ pax_force_retaddr
21786 ret
21787 CFI_ENDPROC
21788 ENDPROC(__get_user_1)
21789@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21790 ENTRY(__get_user_2)
21791 CFI_STARTPROC
21792 add $1,%_ASM_AX
21793+
21794+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21795 jc bad_get_user
21796 GET_THREAD_INFO(%_ASM_DX)
21797 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21798 jae bad_get_user
21799-2: movzwl -1(%_ASM_AX),%edx
21800+
21801+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21802+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21803+ cmp %_ASM_DX,%_ASM_AX
21804+ jae 1234f
21805+ add %_ASM_DX,%_ASM_AX
21806+1234:
21807+#endif
21808+
21809+#endif
21810+
21811+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21812 xor %eax,%eax
21813+ pax_force_retaddr
21814 ret
21815 CFI_ENDPROC
21816 ENDPROC(__get_user_2)
21817@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21818 ENTRY(__get_user_4)
21819 CFI_STARTPROC
21820 add $3,%_ASM_AX
21821+
21822+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21823 jc bad_get_user
21824 GET_THREAD_INFO(%_ASM_DX)
21825 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21826 jae bad_get_user
21827-3: mov -3(%_ASM_AX),%edx
21828+
21829+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21830+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21831+ cmp %_ASM_DX,%_ASM_AX
21832+ jae 1234f
21833+ add %_ASM_DX,%_ASM_AX
21834+1234:
21835+#endif
21836+
21837+#endif
21838+
21839+3: __copyuser_seg mov -3(%_ASM_AX),%edx
21840 xor %eax,%eax
21841+ pax_force_retaddr
21842 ret
21843 CFI_ENDPROC
21844 ENDPROC(__get_user_4)
21845@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21846 GET_THREAD_INFO(%_ASM_DX)
21847 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21848 jae bad_get_user
21849+
21850+#ifdef CONFIG_PAX_MEMORY_UDEREF
21851+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21852+ cmp %_ASM_DX,%_ASM_AX
21853+ jae 1234f
21854+ add %_ASM_DX,%_ASM_AX
21855+1234:
21856+#endif
21857+
21858 4: movq -7(%_ASM_AX),%_ASM_DX
21859 xor %eax,%eax
21860+ pax_force_retaddr
21861 ret
21862 CFI_ENDPROC
21863 ENDPROC(__get_user_8)
21864@@ -91,6 +152,7 @@ bad_get_user:
21865 CFI_STARTPROC
21866 xor %edx,%edx
21867 mov $(-EFAULT),%_ASM_AX
21868+ pax_force_retaddr
21869 ret
21870 CFI_ENDPROC
21871 END(bad_get_user)
21872diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21873index b1e6c4b..21ae8fc 100644
21874--- a/arch/x86/lib/insn.c
21875+++ b/arch/x86/lib/insn.c
21876@@ -21,6 +21,11 @@
21877 #include <linux/string.h>
21878 #include <asm/inat.h>
21879 #include <asm/insn.h>
21880+#ifdef __KERNEL__
21881+#include <asm/pgtable_types.h>
21882+#else
21883+#define ktla_ktva(addr) addr
21884+#endif
21885
21886 /* Verify next sizeof(t) bytes can be on the same instruction */
21887 #define validate_next(t, insn, n) \
21888@@ -49,8 +54,8 @@
21889 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21890 {
21891 memset(insn, 0, sizeof(*insn));
21892- insn->kaddr = kaddr;
21893- insn->next_byte = kaddr;
21894+ insn->kaddr = ktla_ktva(kaddr);
21895+ insn->next_byte = ktla_ktva(kaddr);
21896 insn->x86_64 = x86_64 ? 1 : 0;
21897 insn->opnd_bytes = 4;
21898 if (x86_64)
21899diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21900index 05a95e7..326f2fa 100644
21901--- a/arch/x86/lib/iomap_copy_64.S
21902+++ b/arch/x86/lib/iomap_copy_64.S
21903@@ -17,6 +17,7 @@
21904
21905 #include <linux/linkage.h>
21906 #include <asm/dwarf2.h>
21907+#include <asm/alternative-asm.h>
21908
21909 /*
21910 * override generic version in lib/iomap_copy.c
21911@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21912 CFI_STARTPROC
21913 movl %edx,%ecx
21914 rep movsd
21915+ pax_force_retaddr
21916 ret
21917 CFI_ENDPROC
21918 ENDPROC(__iowrite32_copy)
21919diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21920index 1c273be..da9cc0e 100644
21921--- a/arch/x86/lib/memcpy_64.S
21922+++ b/arch/x86/lib/memcpy_64.S
21923@@ -33,6 +33,7 @@
21924 rep movsq
21925 movl %edx, %ecx
21926 rep movsb
21927+ pax_force_retaddr
21928 ret
21929 .Lmemcpy_e:
21930 .previous
21931@@ -49,6 +50,7 @@
21932 movq %rdi, %rax
21933 movq %rdx, %rcx
21934 rep movsb
21935+ pax_force_retaddr
21936 ret
21937 .Lmemcpy_e_e:
21938 .previous
21939@@ -76,13 +78,13 @@ ENTRY(memcpy)
21940 */
21941 movq 0*8(%rsi), %r8
21942 movq 1*8(%rsi), %r9
21943- movq 2*8(%rsi), %r10
21944+ movq 2*8(%rsi), %rcx
21945 movq 3*8(%rsi), %r11
21946 leaq 4*8(%rsi), %rsi
21947
21948 movq %r8, 0*8(%rdi)
21949 movq %r9, 1*8(%rdi)
21950- movq %r10, 2*8(%rdi)
21951+ movq %rcx, 2*8(%rdi)
21952 movq %r11, 3*8(%rdi)
21953 leaq 4*8(%rdi), %rdi
21954 jae .Lcopy_forward_loop
21955@@ -105,12 +107,12 @@ ENTRY(memcpy)
21956 subq $0x20, %rdx
21957 movq -1*8(%rsi), %r8
21958 movq -2*8(%rsi), %r9
21959- movq -3*8(%rsi), %r10
21960+ movq -3*8(%rsi), %rcx
21961 movq -4*8(%rsi), %r11
21962 leaq -4*8(%rsi), %rsi
21963 movq %r8, -1*8(%rdi)
21964 movq %r9, -2*8(%rdi)
21965- movq %r10, -3*8(%rdi)
21966+ movq %rcx, -3*8(%rdi)
21967 movq %r11, -4*8(%rdi)
21968 leaq -4*8(%rdi), %rdi
21969 jae .Lcopy_backward_loop
21970@@ -130,12 +132,13 @@ ENTRY(memcpy)
21971 */
21972 movq 0*8(%rsi), %r8
21973 movq 1*8(%rsi), %r9
21974- movq -2*8(%rsi, %rdx), %r10
21975+ movq -2*8(%rsi, %rdx), %rcx
21976 movq -1*8(%rsi, %rdx), %r11
21977 movq %r8, 0*8(%rdi)
21978 movq %r9, 1*8(%rdi)
21979- movq %r10, -2*8(%rdi, %rdx)
21980+ movq %rcx, -2*8(%rdi, %rdx)
21981 movq %r11, -1*8(%rdi, %rdx)
21982+ pax_force_retaddr
21983 retq
21984 .p2align 4
21985 .Lless_16bytes:
21986@@ -148,6 +151,7 @@ ENTRY(memcpy)
21987 movq -1*8(%rsi, %rdx), %r9
21988 movq %r8, 0*8(%rdi)
21989 movq %r9, -1*8(%rdi, %rdx)
21990+ pax_force_retaddr
21991 retq
21992 .p2align 4
21993 .Lless_8bytes:
21994@@ -161,6 +165,7 @@ ENTRY(memcpy)
21995 movl -4(%rsi, %rdx), %r8d
21996 movl %ecx, (%rdi)
21997 movl %r8d, -4(%rdi, %rdx)
21998+ pax_force_retaddr
21999 retq
22000 .p2align 4
22001 .Lless_3bytes:
22002@@ -179,6 +184,7 @@ ENTRY(memcpy)
22003 movb %cl, (%rdi)
22004
22005 .Lend:
22006+ pax_force_retaddr
22007 retq
22008 CFI_ENDPROC
22009 ENDPROC(memcpy)
22010diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
22011index ee16461..c39c199 100644
22012--- a/arch/x86/lib/memmove_64.S
22013+++ b/arch/x86/lib/memmove_64.S
22014@@ -61,13 +61,13 @@ ENTRY(memmove)
22015 5:
22016 sub $0x20, %rdx
22017 movq 0*8(%rsi), %r11
22018- movq 1*8(%rsi), %r10
22019+ movq 1*8(%rsi), %rcx
22020 movq 2*8(%rsi), %r9
22021 movq 3*8(%rsi), %r8
22022 leaq 4*8(%rsi), %rsi
22023
22024 movq %r11, 0*8(%rdi)
22025- movq %r10, 1*8(%rdi)
22026+ movq %rcx, 1*8(%rdi)
22027 movq %r9, 2*8(%rdi)
22028 movq %r8, 3*8(%rdi)
22029 leaq 4*8(%rdi), %rdi
22030@@ -81,10 +81,10 @@ ENTRY(memmove)
22031 4:
22032 movq %rdx, %rcx
22033 movq -8(%rsi, %rdx), %r11
22034- lea -8(%rdi, %rdx), %r10
22035+ lea -8(%rdi, %rdx), %r9
22036 shrq $3, %rcx
22037 rep movsq
22038- movq %r11, (%r10)
22039+ movq %r11, (%r9)
22040 jmp 13f
22041 .Lmemmove_end_forward:
22042
22043@@ -95,14 +95,14 @@ ENTRY(memmove)
22044 7:
22045 movq %rdx, %rcx
22046 movq (%rsi), %r11
22047- movq %rdi, %r10
22048+ movq %rdi, %r9
22049 leaq -8(%rsi, %rdx), %rsi
22050 leaq -8(%rdi, %rdx), %rdi
22051 shrq $3, %rcx
22052 std
22053 rep movsq
22054 cld
22055- movq %r11, (%r10)
22056+ movq %r11, (%r9)
22057 jmp 13f
22058
22059 /*
22060@@ -127,13 +127,13 @@ ENTRY(memmove)
22061 8:
22062 subq $0x20, %rdx
22063 movq -1*8(%rsi), %r11
22064- movq -2*8(%rsi), %r10
22065+ movq -2*8(%rsi), %rcx
22066 movq -3*8(%rsi), %r9
22067 movq -4*8(%rsi), %r8
22068 leaq -4*8(%rsi), %rsi
22069
22070 movq %r11, -1*8(%rdi)
22071- movq %r10, -2*8(%rdi)
22072+ movq %rcx, -2*8(%rdi)
22073 movq %r9, -3*8(%rdi)
22074 movq %r8, -4*8(%rdi)
22075 leaq -4*8(%rdi), %rdi
22076@@ -151,11 +151,11 @@ ENTRY(memmove)
22077 * Move data from 16 bytes to 31 bytes.
22078 */
22079 movq 0*8(%rsi), %r11
22080- movq 1*8(%rsi), %r10
22081+ movq 1*8(%rsi), %rcx
22082 movq -2*8(%rsi, %rdx), %r9
22083 movq -1*8(%rsi, %rdx), %r8
22084 movq %r11, 0*8(%rdi)
22085- movq %r10, 1*8(%rdi)
22086+ movq %rcx, 1*8(%rdi)
22087 movq %r9, -2*8(%rdi, %rdx)
22088 movq %r8, -1*8(%rdi, %rdx)
22089 jmp 13f
22090@@ -167,9 +167,9 @@ ENTRY(memmove)
22091 * Move data from 8 bytes to 15 bytes.
22092 */
22093 movq 0*8(%rsi), %r11
22094- movq -1*8(%rsi, %rdx), %r10
22095+ movq -1*8(%rsi, %rdx), %r9
22096 movq %r11, 0*8(%rdi)
22097- movq %r10, -1*8(%rdi, %rdx)
22098+ movq %r9, -1*8(%rdi, %rdx)
22099 jmp 13f
22100 10:
22101 cmpq $4, %rdx
22102@@ -178,9 +178,9 @@ ENTRY(memmove)
22103 * Move data from 4 bytes to 7 bytes.
22104 */
22105 movl (%rsi), %r11d
22106- movl -4(%rsi, %rdx), %r10d
22107+ movl -4(%rsi, %rdx), %r9d
22108 movl %r11d, (%rdi)
22109- movl %r10d, -4(%rdi, %rdx)
22110+ movl %r9d, -4(%rdi, %rdx)
22111 jmp 13f
22112 11:
22113 cmp $2, %rdx
22114@@ -189,9 +189,9 @@ ENTRY(memmove)
22115 * Move data from 2 bytes to 3 bytes.
22116 */
22117 movw (%rsi), %r11w
22118- movw -2(%rsi, %rdx), %r10w
22119+ movw -2(%rsi, %rdx), %r9w
22120 movw %r11w, (%rdi)
22121- movw %r10w, -2(%rdi, %rdx)
22122+ movw %r9w, -2(%rdi, %rdx)
22123 jmp 13f
22124 12:
22125 cmp $1, %rdx
22126@@ -202,6 +202,7 @@ ENTRY(memmove)
22127 movb (%rsi), %r11b
22128 movb %r11b, (%rdi)
22129 13:
22130+ pax_force_retaddr
22131 retq
22132 CFI_ENDPROC
22133
22134@@ -210,6 +211,7 @@ ENTRY(memmove)
22135 /* Forward moving data. */
22136 movq %rdx, %rcx
22137 rep movsb
22138+ pax_force_retaddr
22139 retq
22140 .Lmemmove_end_forward_efs:
22141 .previous
22142diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22143index 2dcb380..963660a 100644
22144--- a/arch/x86/lib/memset_64.S
22145+++ b/arch/x86/lib/memset_64.S
22146@@ -30,6 +30,7 @@
22147 movl %edx,%ecx
22148 rep stosb
22149 movq %r9,%rax
22150+ pax_force_retaddr
22151 ret
22152 .Lmemset_e:
22153 .previous
22154@@ -52,6 +53,7 @@
22155 movq %rdx,%rcx
22156 rep stosb
22157 movq %r9,%rax
22158+ pax_force_retaddr
22159 ret
22160 .Lmemset_e_e:
22161 .previous
22162@@ -59,7 +61,7 @@
22163 ENTRY(memset)
22164 ENTRY(__memset)
22165 CFI_STARTPROC
22166- movq %rdi,%r10
22167+ movq %rdi,%r11
22168
22169 /* expand byte value */
22170 movzbl %sil,%ecx
22171@@ -117,7 +119,8 @@ ENTRY(__memset)
22172 jnz .Lloop_1
22173
22174 .Lende:
22175- movq %r10,%rax
22176+ movq %r11,%rax
22177+ pax_force_retaddr
22178 ret
22179
22180 CFI_RESTORE_STATE
22181diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22182index c9f2d9b..e7fd2c0 100644
22183--- a/arch/x86/lib/mmx_32.c
22184+++ b/arch/x86/lib/mmx_32.c
22185@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22186 {
22187 void *p;
22188 int i;
22189+ unsigned long cr0;
22190
22191 if (unlikely(in_interrupt()))
22192 return __memcpy(to, from, len);
22193@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22194 kernel_fpu_begin();
22195
22196 __asm__ __volatile__ (
22197- "1: prefetch (%0)\n" /* This set is 28 bytes */
22198- " prefetch 64(%0)\n"
22199- " prefetch 128(%0)\n"
22200- " prefetch 192(%0)\n"
22201- " prefetch 256(%0)\n"
22202+ "1: prefetch (%1)\n" /* This set is 28 bytes */
22203+ " prefetch 64(%1)\n"
22204+ " prefetch 128(%1)\n"
22205+ " prefetch 192(%1)\n"
22206+ " prefetch 256(%1)\n"
22207 "2: \n"
22208 ".section .fixup, \"ax\"\n"
22209- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22210+ "3: \n"
22211+
22212+#ifdef CONFIG_PAX_KERNEXEC
22213+ " movl %%cr0, %0\n"
22214+ " movl %0, %%eax\n"
22215+ " andl $0xFFFEFFFF, %%eax\n"
22216+ " movl %%eax, %%cr0\n"
22217+#endif
22218+
22219+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22220+
22221+#ifdef CONFIG_PAX_KERNEXEC
22222+ " movl %0, %%cr0\n"
22223+#endif
22224+
22225 " jmp 2b\n"
22226 ".previous\n"
22227 _ASM_EXTABLE(1b, 3b)
22228- : : "r" (from));
22229+ : "=&r" (cr0) : "r" (from) : "ax");
22230
22231 for ( ; i > 5; i--) {
22232 __asm__ __volatile__ (
22233- "1: prefetch 320(%0)\n"
22234- "2: movq (%0), %%mm0\n"
22235- " movq 8(%0), %%mm1\n"
22236- " movq 16(%0), %%mm2\n"
22237- " movq 24(%0), %%mm3\n"
22238- " movq %%mm0, (%1)\n"
22239- " movq %%mm1, 8(%1)\n"
22240- " movq %%mm2, 16(%1)\n"
22241- " movq %%mm3, 24(%1)\n"
22242- " movq 32(%0), %%mm0\n"
22243- " movq 40(%0), %%mm1\n"
22244- " movq 48(%0), %%mm2\n"
22245- " movq 56(%0), %%mm3\n"
22246- " movq %%mm0, 32(%1)\n"
22247- " movq %%mm1, 40(%1)\n"
22248- " movq %%mm2, 48(%1)\n"
22249- " movq %%mm3, 56(%1)\n"
22250+ "1: prefetch 320(%1)\n"
22251+ "2: movq (%1), %%mm0\n"
22252+ " movq 8(%1), %%mm1\n"
22253+ " movq 16(%1), %%mm2\n"
22254+ " movq 24(%1), %%mm3\n"
22255+ " movq %%mm0, (%2)\n"
22256+ " movq %%mm1, 8(%2)\n"
22257+ " movq %%mm2, 16(%2)\n"
22258+ " movq %%mm3, 24(%2)\n"
22259+ " movq 32(%1), %%mm0\n"
22260+ " movq 40(%1), %%mm1\n"
22261+ " movq 48(%1), %%mm2\n"
22262+ " movq 56(%1), %%mm3\n"
22263+ " movq %%mm0, 32(%2)\n"
22264+ " movq %%mm1, 40(%2)\n"
22265+ " movq %%mm2, 48(%2)\n"
22266+ " movq %%mm3, 56(%2)\n"
22267 ".section .fixup, \"ax\"\n"
22268- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22269+ "3:\n"
22270+
22271+#ifdef CONFIG_PAX_KERNEXEC
22272+ " movl %%cr0, %0\n"
22273+ " movl %0, %%eax\n"
22274+ " andl $0xFFFEFFFF, %%eax\n"
22275+ " movl %%eax, %%cr0\n"
22276+#endif
22277+
22278+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22279+
22280+#ifdef CONFIG_PAX_KERNEXEC
22281+ " movl %0, %%cr0\n"
22282+#endif
22283+
22284 " jmp 2b\n"
22285 ".previous\n"
22286 _ASM_EXTABLE(1b, 3b)
22287- : : "r" (from), "r" (to) : "memory");
22288+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22289
22290 from += 64;
22291 to += 64;
22292@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22293 static void fast_copy_page(void *to, void *from)
22294 {
22295 int i;
22296+ unsigned long cr0;
22297
22298 kernel_fpu_begin();
22299
22300@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22301 * but that is for later. -AV
22302 */
22303 __asm__ __volatile__(
22304- "1: prefetch (%0)\n"
22305- " prefetch 64(%0)\n"
22306- " prefetch 128(%0)\n"
22307- " prefetch 192(%0)\n"
22308- " prefetch 256(%0)\n"
22309+ "1: prefetch (%1)\n"
22310+ " prefetch 64(%1)\n"
22311+ " prefetch 128(%1)\n"
22312+ " prefetch 192(%1)\n"
22313+ " prefetch 256(%1)\n"
22314 "2: \n"
22315 ".section .fixup, \"ax\"\n"
22316- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22317+ "3: \n"
22318+
22319+#ifdef CONFIG_PAX_KERNEXEC
22320+ " movl %%cr0, %0\n"
22321+ " movl %0, %%eax\n"
22322+ " andl $0xFFFEFFFF, %%eax\n"
22323+ " movl %%eax, %%cr0\n"
22324+#endif
22325+
22326+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22327+
22328+#ifdef CONFIG_PAX_KERNEXEC
22329+ " movl %0, %%cr0\n"
22330+#endif
22331+
22332 " jmp 2b\n"
22333 ".previous\n"
22334- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22335+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22336
22337 for (i = 0; i < (4096-320)/64; i++) {
22338 __asm__ __volatile__ (
22339- "1: prefetch 320(%0)\n"
22340- "2: movq (%0), %%mm0\n"
22341- " movntq %%mm0, (%1)\n"
22342- " movq 8(%0), %%mm1\n"
22343- " movntq %%mm1, 8(%1)\n"
22344- " movq 16(%0), %%mm2\n"
22345- " movntq %%mm2, 16(%1)\n"
22346- " movq 24(%0), %%mm3\n"
22347- " movntq %%mm3, 24(%1)\n"
22348- " movq 32(%0), %%mm4\n"
22349- " movntq %%mm4, 32(%1)\n"
22350- " movq 40(%0), %%mm5\n"
22351- " movntq %%mm5, 40(%1)\n"
22352- " movq 48(%0), %%mm6\n"
22353- " movntq %%mm6, 48(%1)\n"
22354- " movq 56(%0), %%mm7\n"
22355- " movntq %%mm7, 56(%1)\n"
22356+ "1: prefetch 320(%1)\n"
22357+ "2: movq (%1), %%mm0\n"
22358+ " movntq %%mm0, (%2)\n"
22359+ " movq 8(%1), %%mm1\n"
22360+ " movntq %%mm1, 8(%2)\n"
22361+ " movq 16(%1), %%mm2\n"
22362+ " movntq %%mm2, 16(%2)\n"
22363+ " movq 24(%1), %%mm3\n"
22364+ " movntq %%mm3, 24(%2)\n"
22365+ " movq 32(%1), %%mm4\n"
22366+ " movntq %%mm4, 32(%2)\n"
22367+ " movq 40(%1), %%mm5\n"
22368+ " movntq %%mm5, 40(%2)\n"
22369+ " movq 48(%1), %%mm6\n"
22370+ " movntq %%mm6, 48(%2)\n"
22371+ " movq 56(%1), %%mm7\n"
22372+ " movntq %%mm7, 56(%2)\n"
22373 ".section .fixup, \"ax\"\n"
22374- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22375+ "3:\n"
22376+
22377+#ifdef CONFIG_PAX_KERNEXEC
22378+ " movl %%cr0, %0\n"
22379+ " movl %0, %%eax\n"
22380+ " andl $0xFFFEFFFF, %%eax\n"
22381+ " movl %%eax, %%cr0\n"
22382+#endif
22383+
22384+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22385+
22386+#ifdef CONFIG_PAX_KERNEXEC
22387+ " movl %0, %%cr0\n"
22388+#endif
22389+
22390 " jmp 2b\n"
22391 ".previous\n"
22392- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22393+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22394
22395 from += 64;
22396 to += 64;
22397@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22398 static void fast_copy_page(void *to, void *from)
22399 {
22400 int i;
22401+ unsigned long cr0;
22402
22403 kernel_fpu_begin();
22404
22405 __asm__ __volatile__ (
22406- "1: prefetch (%0)\n"
22407- " prefetch 64(%0)\n"
22408- " prefetch 128(%0)\n"
22409- " prefetch 192(%0)\n"
22410- " prefetch 256(%0)\n"
22411+ "1: prefetch (%1)\n"
22412+ " prefetch 64(%1)\n"
22413+ " prefetch 128(%1)\n"
22414+ " prefetch 192(%1)\n"
22415+ " prefetch 256(%1)\n"
22416 "2: \n"
22417 ".section .fixup, \"ax\"\n"
22418- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22419+ "3: \n"
22420+
22421+#ifdef CONFIG_PAX_KERNEXEC
22422+ " movl %%cr0, %0\n"
22423+ " movl %0, %%eax\n"
22424+ " andl $0xFFFEFFFF, %%eax\n"
22425+ " movl %%eax, %%cr0\n"
22426+#endif
22427+
22428+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22429+
22430+#ifdef CONFIG_PAX_KERNEXEC
22431+ " movl %0, %%cr0\n"
22432+#endif
22433+
22434 " jmp 2b\n"
22435 ".previous\n"
22436- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22437+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22438
22439 for (i = 0; i < 4096/64; i++) {
22440 __asm__ __volatile__ (
22441- "1: prefetch 320(%0)\n"
22442- "2: movq (%0), %%mm0\n"
22443- " movq 8(%0), %%mm1\n"
22444- " movq 16(%0), %%mm2\n"
22445- " movq 24(%0), %%mm3\n"
22446- " movq %%mm0, (%1)\n"
22447- " movq %%mm1, 8(%1)\n"
22448- " movq %%mm2, 16(%1)\n"
22449- " movq %%mm3, 24(%1)\n"
22450- " movq 32(%0), %%mm0\n"
22451- " movq 40(%0), %%mm1\n"
22452- " movq 48(%0), %%mm2\n"
22453- " movq 56(%0), %%mm3\n"
22454- " movq %%mm0, 32(%1)\n"
22455- " movq %%mm1, 40(%1)\n"
22456- " movq %%mm2, 48(%1)\n"
22457- " movq %%mm3, 56(%1)\n"
22458+ "1: prefetch 320(%1)\n"
22459+ "2: movq (%1), %%mm0\n"
22460+ " movq 8(%1), %%mm1\n"
22461+ " movq 16(%1), %%mm2\n"
22462+ " movq 24(%1), %%mm3\n"
22463+ " movq %%mm0, (%2)\n"
22464+ " movq %%mm1, 8(%2)\n"
22465+ " movq %%mm2, 16(%2)\n"
22466+ " movq %%mm3, 24(%2)\n"
22467+ " movq 32(%1), %%mm0\n"
22468+ " movq 40(%1), %%mm1\n"
22469+ " movq 48(%1), %%mm2\n"
22470+ " movq 56(%1), %%mm3\n"
22471+ " movq %%mm0, 32(%2)\n"
22472+ " movq %%mm1, 40(%2)\n"
22473+ " movq %%mm2, 48(%2)\n"
22474+ " movq %%mm3, 56(%2)\n"
22475 ".section .fixup, \"ax\"\n"
22476- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22477+ "3:\n"
22478+
22479+#ifdef CONFIG_PAX_KERNEXEC
22480+ " movl %%cr0, %0\n"
22481+ " movl %0, %%eax\n"
22482+ " andl $0xFFFEFFFF, %%eax\n"
22483+ " movl %%eax, %%cr0\n"
22484+#endif
22485+
22486+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22487+
22488+#ifdef CONFIG_PAX_KERNEXEC
22489+ " movl %0, %%cr0\n"
22490+#endif
22491+
22492 " jmp 2b\n"
22493 ".previous\n"
22494 _ASM_EXTABLE(1b, 3b)
22495- : : "r" (from), "r" (to) : "memory");
22496+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22497
22498 from += 64;
22499 to += 64;
22500diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22501index 69fa106..adda88b 100644
22502--- a/arch/x86/lib/msr-reg.S
22503+++ b/arch/x86/lib/msr-reg.S
22504@@ -3,6 +3,7 @@
22505 #include <asm/dwarf2.h>
22506 #include <asm/asm.h>
22507 #include <asm/msr.h>
22508+#include <asm/alternative-asm.h>
22509
22510 #ifdef CONFIG_X86_64
22511 /*
22512@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22513 CFI_STARTPROC
22514 pushq_cfi %rbx
22515 pushq_cfi %rbp
22516- movq %rdi, %r10 /* Save pointer */
22517+ movq %rdi, %r9 /* Save pointer */
22518 xorl %r11d, %r11d /* Return value */
22519 movl (%rdi), %eax
22520 movl 4(%rdi), %ecx
22521@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22522 movl 28(%rdi), %edi
22523 CFI_REMEMBER_STATE
22524 1: \op
22525-2: movl %eax, (%r10)
22526+2: movl %eax, (%r9)
22527 movl %r11d, %eax /* Return value */
22528- movl %ecx, 4(%r10)
22529- movl %edx, 8(%r10)
22530- movl %ebx, 12(%r10)
22531- movl %ebp, 20(%r10)
22532- movl %esi, 24(%r10)
22533- movl %edi, 28(%r10)
22534+ movl %ecx, 4(%r9)
22535+ movl %edx, 8(%r9)
22536+ movl %ebx, 12(%r9)
22537+ movl %ebp, 20(%r9)
22538+ movl %esi, 24(%r9)
22539+ movl %edi, 28(%r9)
22540 popq_cfi %rbp
22541 popq_cfi %rbx
22542+ pax_force_retaddr
22543 ret
22544 3:
22545 CFI_RESTORE_STATE
22546diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22547index 7f951c8..ebd573a 100644
22548--- a/arch/x86/lib/putuser.S
22549+++ b/arch/x86/lib/putuser.S
22550@@ -15,7 +15,9 @@
22551 #include <asm/thread_info.h>
22552 #include <asm/errno.h>
22553 #include <asm/asm.h>
22554-
22555+#include <asm/segment.h>
22556+#include <asm/pgtable.h>
22557+#include <asm/alternative-asm.h>
22558
22559 /*
22560 * __put_user_X
22561@@ -29,52 +31,119 @@
22562 * as they get called from within inline assembly.
22563 */
22564
22565-#define ENTER CFI_STARTPROC ; \
22566- GET_THREAD_INFO(%_ASM_BX)
22567-#define EXIT ret ; \
22568+#define ENTER CFI_STARTPROC
22569+#define EXIT pax_force_retaddr; ret ; \
22570 CFI_ENDPROC
22571
22572+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22573+#define _DEST %_ASM_CX,%_ASM_BX
22574+#else
22575+#define _DEST %_ASM_CX
22576+#endif
22577+
22578+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22579+#define __copyuser_seg gs;
22580+#else
22581+#define __copyuser_seg
22582+#endif
22583+
22584 .text
22585 ENTRY(__put_user_1)
22586 ENTER
22587+
22588+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22589+ GET_THREAD_INFO(%_ASM_BX)
22590 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22591 jae bad_put_user
22592-1: movb %al,(%_ASM_CX)
22593+
22594+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22595+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22596+ cmp %_ASM_BX,%_ASM_CX
22597+ jb 1234f
22598+ xor %ebx,%ebx
22599+1234:
22600+#endif
22601+
22602+#endif
22603+
22604+1: __copyuser_seg movb %al,(_DEST)
22605 xor %eax,%eax
22606 EXIT
22607 ENDPROC(__put_user_1)
22608
22609 ENTRY(__put_user_2)
22610 ENTER
22611+
22612+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22613+ GET_THREAD_INFO(%_ASM_BX)
22614 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22615 sub $1,%_ASM_BX
22616 cmp %_ASM_BX,%_ASM_CX
22617 jae bad_put_user
22618-2: movw %ax,(%_ASM_CX)
22619+
22620+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22621+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22622+ cmp %_ASM_BX,%_ASM_CX
22623+ jb 1234f
22624+ xor %ebx,%ebx
22625+1234:
22626+#endif
22627+
22628+#endif
22629+
22630+2: __copyuser_seg movw %ax,(_DEST)
22631 xor %eax,%eax
22632 EXIT
22633 ENDPROC(__put_user_2)
22634
22635 ENTRY(__put_user_4)
22636 ENTER
22637+
22638+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22639+ GET_THREAD_INFO(%_ASM_BX)
22640 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22641 sub $3,%_ASM_BX
22642 cmp %_ASM_BX,%_ASM_CX
22643 jae bad_put_user
22644-3: movl %eax,(%_ASM_CX)
22645+
22646+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22647+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22648+ cmp %_ASM_BX,%_ASM_CX
22649+ jb 1234f
22650+ xor %ebx,%ebx
22651+1234:
22652+#endif
22653+
22654+#endif
22655+
22656+3: __copyuser_seg movl %eax,(_DEST)
22657 xor %eax,%eax
22658 EXIT
22659 ENDPROC(__put_user_4)
22660
22661 ENTRY(__put_user_8)
22662 ENTER
22663+
22664+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22665+ GET_THREAD_INFO(%_ASM_BX)
22666 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22667 sub $7,%_ASM_BX
22668 cmp %_ASM_BX,%_ASM_CX
22669 jae bad_put_user
22670-4: mov %_ASM_AX,(%_ASM_CX)
22671+
22672+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22673+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22674+ cmp %_ASM_BX,%_ASM_CX
22675+ jb 1234f
22676+ xor %ebx,%ebx
22677+1234:
22678+#endif
22679+
22680+#endif
22681+
22682+4: __copyuser_seg mov %_ASM_AX,(_DEST)
22683 #ifdef CONFIG_X86_32
22684-5: movl %edx,4(%_ASM_CX)
22685+5: __copyuser_seg movl %edx,4(_DEST)
22686 #endif
22687 xor %eax,%eax
22688 EXIT
22689diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22690index 1cad221..de671ee 100644
22691--- a/arch/x86/lib/rwlock.S
22692+++ b/arch/x86/lib/rwlock.S
22693@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22694 FRAME
22695 0: LOCK_PREFIX
22696 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22697+
22698+#ifdef CONFIG_PAX_REFCOUNT
22699+ jno 1234f
22700+ LOCK_PREFIX
22701+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22702+ int $4
22703+1234:
22704+ _ASM_EXTABLE(1234b, 1234b)
22705+#endif
22706+
22707 1: rep; nop
22708 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22709 jne 1b
22710 LOCK_PREFIX
22711 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22712+
22713+#ifdef CONFIG_PAX_REFCOUNT
22714+ jno 1234f
22715+ LOCK_PREFIX
22716+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22717+ int $4
22718+1234:
22719+ _ASM_EXTABLE(1234b, 1234b)
22720+#endif
22721+
22722 jnz 0b
22723 ENDFRAME
22724+ pax_force_retaddr
22725 ret
22726 CFI_ENDPROC
22727 END(__write_lock_failed)
22728@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22729 FRAME
22730 0: LOCK_PREFIX
22731 READ_LOCK_SIZE(inc) (%__lock_ptr)
22732+
22733+#ifdef CONFIG_PAX_REFCOUNT
22734+ jno 1234f
22735+ LOCK_PREFIX
22736+ READ_LOCK_SIZE(dec) (%__lock_ptr)
22737+ int $4
22738+1234:
22739+ _ASM_EXTABLE(1234b, 1234b)
22740+#endif
22741+
22742 1: rep; nop
22743 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22744 js 1b
22745 LOCK_PREFIX
22746 READ_LOCK_SIZE(dec) (%__lock_ptr)
22747+
22748+#ifdef CONFIG_PAX_REFCOUNT
22749+ jno 1234f
22750+ LOCK_PREFIX
22751+ READ_LOCK_SIZE(inc) (%__lock_ptr)
22752+ int $4
22753+1234:
22754+ _ASM_EXTABLE(1234b, 1234b)
22755+#endif
22756+
22757 js 0b
22758 ENDFRAME
22759+ pax_force_retaddr
22760 ret
22761 CFI_ENDPROC
22762 END(__read_lock_failed)
22763diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22764index 5dff5f0..cadebf4 100644
22765--- a/arch/x86/lib/rwsem.S
22766+++ b/arch/x86/lib/rwsem.S
22767@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22768 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22769 CFI_RESTORE __ASM_REG(dx)
22770 restore_common_regs
22771+ pax_force_retaddr
22772 ret
22773 CFI_ENDPROC
22774 ENDPROC(call_rwsem_down_read_failed)
22775@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22776 movq %rax,%rdi
22777 call rwsem_down_write_failed
22778 restore_common_regs
22779+ pax_force_retaddr
22780 ret
22781 CFI_ENDPROC
22782 ENDPROC(call_rwsem_down_write_failed)
22783@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22784 movq %rax,%rdi
22785 call rwsem_wake
22786 restore_common_regs
22787-1: ret
22788+1: pax_force_retaddr
22789+ ret
22790 CFI_ENDPROC
22791 ENDPROC(call_rwsem_wake)
22792
22793@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22794 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22795 CFI_RESTORE __ASM_REG(dx)
22796 restore_common_regs
22797+ pax_force_retaddr
22798 ret
22799 CFI_ENDPROC
22800 ENDPROC(call_rwsem_downgrade_wake)
22801diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22802index a63efd6..ccecad8 100644
22803--- a/arch/x86/lib/thunk_64.S
22804+++ b/arch/x86/lib/thunk_64.S
22805@@ -8,6 +8,7 @@
22806 #include <linux/linkage.h>
22807 #include <asm/dwarf2.h>
22808 #include <asm/calling.h>
22809+#include <asm/alternative-asm.h>
22810
22811 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22812 .macro THUNK name, func, put_ret_addr_in_rdi=0
22813@@ -41,5 +42,6 @@
22814 SAVE_ARGS
22815 restore:
22816 RESTORE_ARGS
22817+ pax_force_retaddr
22818 ret
22819 CFI_ENDPROC
22820diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22821index 1781b2f..90368dd 100644
22822--- a/arch/x86/lib/usercopy_32.c
22823+++ b/arch/x86/lib/usercopy_32.c
22824@@ -42,10 +42,12 @@ do { \
22825 int __d0; \
22826 might_fault(); \
22827 __asm__ __volatile__( \
22828+ __COPYUSER_SET_ES \
22829 "0: rep; stosl\n" \
22830 " movl %2,%0\n" \
22831 "1: rep; stosb\n" \
22832 "2:\n" \
22833+ __COPYUSER_RESTORE_ES \
22834 ".section .fixup,\"ax\"\n" \
22835 "3: lea 0(%2,%0,4),%0\n" \
22836 " jmp 2b\n" \
22837@@ -97,7 +99,7 @@ EXPORT_SYMBOL(__clear_user);
22838
22839 #ifdef CONFIG_X86_INTEL_USERCOPY
22840 static unsigned long
22841-__copy_user_intel(void __user *to, const void *from, unsigned long size)
22842+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22843 {
22844 int d0, d1;
22845 __asm__ __volatile__(
22846@@ -109,36 +111,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22847 " .align 2,0x90\n"
22848 "3: movl 0(%4), %%eax\n"
22849 "4: movl 4(%4), %%edx\n"
22850- "5: movl %%eax, 0(%3)\n"
22851- "6: movl %%edx, 4(%3)\n"
22852+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22853+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22854 "7: movl 8(%4), %%eax\n"
22855 "8: movl 12(%4),%%edx\n"
22856- "9: movl %%eax, 8(%3)\n"
22857- "10: movl %%edx, 12(%3)\n"
22858+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22859+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22860 "11: movl 16(%4), %%eax\n"
22861 "12: movl 20(%4), %%edx\n"
22862- "13: movl %%eax, 16(%3)\n"
22863- "14: movl %%edx, 20(%3)\n"
22864+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22865+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22866 "15: movl 24(%4), %%eax\n"
22867 "16: movl 28(%4), %%edx\n"
22868- "17: movl %%eax, 24(%3)\n"
22869- "18: movl %%edx, 28(%3)\n"
22870+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22871+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22872 "19: movl 32(%4), %%eax\n"
22873 "20: movl 36(%4), %%edx\n"
22874- "21: movl %%eax, 32(%3)\n"
22875- "22: movl %%edx, 36(%3)\n"
22876+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22877+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22878 "23: movl 40(%4), %%eax\n"
22879 "24: movl 44(%4), %%edx\n"
22880- "25: movl %%eax, 40(%3)\n"
22881- "26: movl %%edx, 44(%3)\n"
22882+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22883+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22884 "27: movl 48(%4), %%eax\n"
22885 "28: movl 52(%4), %%edx\n"
22886- "29: movl %%eax, 48(%3)\n"
22887- "30: movl %%edx, 52(%3)\n"
22888+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22889+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22890 "31: movl 56(%4), %%eax\n"
22891 "32: movl 60(%4), %%edx\n"
22892- "33: movl %%eax, 56(%3)\n"
22893- "34: movl %%edx, 60(%3)\n"
22894+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22895+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22896 " addl $-64, %0\n"
22897 " addl $64, %4\n"
22898 " addl $64, %3\n"
22899@@ -148,10 +150,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22900 " shrl $2, %0\n"
22901 " andl $3, %%eax\n"
22902 " cld\n"
22903+ __COPYUSER_SET_ES
22904 "99: rep; movsl\n"
22905 "36: movl %%eax, %0\n"
22906 "37: rep; movsb\n"
22907 "100:\n"
22908+ __COPYUSER_RESTORE_ES
22909 ".section .fixup,\"ax\"\n"
22910 "101: lea 0(%%eax,%0,4),%0\n"
22911 " jmp 100b\n"
22912@@ -201,46 +205,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22913 }
22914
22915 static unsigned long
22916+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
22917+{
22918+ int d0, d1;
22919+ __asm__ __volatile__(
22920+ " .align 2,0x90\n"
22921+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
22922+ " cmpl $67, %0\n"
22923+ " jbe 3f\n"
22924+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
22925+ " .align 2,0x90\n"
22926+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
22927+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
22928+ "5: movl %%eax, 0(%3)\n"
22929+ "6: movl %%edx, 4(%3)\n"
22930+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
22931+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
22932+ "9: movl %%eax, 8(%3)\n"
22933+ "10: movl %%edx, 12(%3)\n"
22934+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
22935+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
22936+ "13: movl %%eax, 16(%3)\n"
22937+ "14: movl %%edx, 20(%3)\n"
22938+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
22939+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
22940+ "17: movl %%eax, 24(%3)\n"
22941+ "18: movl %%edx, 28(%3)\n"
22942+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
22943+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
22944+ "21: movl %%eax, 32(%3)\n"
22945+ "22: movl %%edx, 36(%3)\n"
22946+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
22947+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
22948+ "25: movl %%eax, 40(%3)\n"
22949+ "26: movl %%edx, 44(%3)\n"
22950+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
22951+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
22952+ "29: movl %%eax, 48(%3)\n"
22953+ "30: movl %%edx, 52(%3)\n"
22954+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
22955+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
22956+ "33: movl %%eax, 56(%3)\n"
22957+ "34: movl %%edx, 60(%3)\n"
22958+ " addl $-64, %0\n"
22959+ " addl $64, %4\n"
22960+ " addl $64, %3\n"
22961+ " cmpl $63, %0\n"
22962+ " ja 1b\n"
22963+ "35: movl %0, %%eax\n"
22964+ " shrl $2, %0\n"
22965+ " andl $3, %%eax\n"
22966+ " cld\n"
22967+ "99: rep; "__copyuser_seg" movsl\n"
22968+ "36: movl %%eax, %0\n"
22969+ "37: rep; "__copyuser_seg" movsb\n"
22970+ "100:\n"
22971+ ".section .fixup,\"ax\"\n"
22972+ "101: lea 0(%%eax,%0,4),%0\n"
22973+ " jmp 100b\n"
22974+ ".previous\n"
22975+ _ASM_EXTABLE(1b,100b)
22976+ _ASM_EXTABLE(2b,100b)
22977+ _ASM_EXTABLE(3b,100b)
22978+ _ASM_EXTABLE(4b,100b)
22979+ _ASM_EXTABLE(5b,100b)
22980+ _ASM_EXTABLE(6b,100b)
22981+ _ASM_EXTABLE(7b,100b)
22982+ _ASM_EXTABLE(8b,100b)
22983+ _ASM_EXTABLE(9b,100b)
22984+ _ASM_EXTABLE(10b,100b)
22985+ _ASM_EXTABLE(11b,100b)
22986+ _ASM_EXTABLE(12b,100b)
22987+ _ASM_EXTABLE(13b,100b)
22988+ _ASM_EXTABLE(14b,100b)
22989+ _ASM_EXTABLE(15b,100b)
22990+ _ASM_EXTABLE(16b,100b)
22991+ _ASM_EXTABLE(17b,100b)
22992+ _ASM_EXTABLE(18b,100b)
22993+ _ASM_EXTABLE(19b,100b)
22994+ _ASM_EXTABLE(20b,100b)
22995+ _ASM_EXTABLE(21b,100b)
22996+ _ASM_EXTABLE(22b,100b)
22997+ _ASM_EXTABLE(23b,100b)
22998+ _ASM_EXTABLE(24b,100b)
22999+ _ASM_EXTABLE(25b,100b)
23000+ _ASM_EXTABLE(26b,100b)
23001+ _ASM_EXTABLE(27b,100b)
23002+ _ASM_EXTABLE(28b,100b)
23003+ _ASM_EXTABLE(29b,100b)
23004+ _ASM_EXTABLE(30b,100b)
23005+ _ASM_EXTABLE(31b,100b)
23006+ _ASM_EXTABLE(32b,100b)
23007+ _ASM_EXTABLE(33b,100b)
23008+ _ASM_EXTABLE(34b,100b)
23009+ _ASM_EXTABLE(35b,100b)
23010+ _ASM_EXTABLE(36b,100b)
23011+ _ASM_EXTABLE(37b,100b)
23012+ _ASM_EXTABLE(99b,101b)
23013+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
23014+ : "1"(to), "2"(from), "0"(size)
23015+ : "eax", "edx", "memory");
23016+ return size;
23017+}
23018+
23019+static unsigned long __size_overflow(3)
23020 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23021 {
23022 int d0, d1;
23023 __asm__ __volatile__(
23024 " .align 2,0x90\n"
23025- "0: movl 32(%4), %%eax\n"
23026+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23027 " cmpl $67, %0\n"
23028 " jbe 2f\n"
23029- "1: movl 64(%4), %%eax\n"
23030+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23031 " .align 2,0x90\n"
23032- "2: movl 0(%4), %%eax\n"
23033- "21: movl 4(%4), %%edx\n"
23034+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23035+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23036 " movl %%eax, 0(%3)\n"
23037 " movl %%edx, 4(%3)\n"
23038- "3: movl 8(%4), %%eax\n"
23039- "31: movl 12(%4),%%edx\n"
23040+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23041+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23042 " movl %%eax, 8(%3)\n"
23043 " movl %%edx, 12(%3)\n"
23044- "4: movl 16(%4), %%eax\n"
23045- "41: movl 20(%4), %%edx\n"
23046+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23047+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23048 " movl %%eax, 16(%3)\n"
23049 " movl %%edx, 20(%3)\n"
23050- "10: movl 24(%4), %%eax\n"
23051- "51: movl 28(%4), %%edx\n"
23052+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23053+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23054 " movl %%eax, 24(%3)\n"
23055 " movl %%edx, 28(%3)\n"
23056- "11: movl 32(%4), %%eax\n"
23057- "61: movl 36(%4), %%edx\n"
23058+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23059+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23060 " movl %%eax, 32(%3)\n"
23061 " movl %%edx, 36(%3)\n"
23062- "12: movl 40(%4), %%eax\n"
23063- "71: movl 44(%4), %%edx\n"
23064+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23065+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23066 " movl %%eax, 40(%3)\n"
23067 " movl %%edx, 44(%3)\n"
23068- "13: movl 48(%4), %%eax\n"
23069- "81: movl 52(%4), %%edx\n"
23070+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23071+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23072 " movl %%eax, 48(%3)\n"
23073 " movl %%edx, 52(%3)\n"
23074- "14: movl 56(%4), %%eax\n"
23075- "91: movl 60(%4), %%edx\n"
23076+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23077+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23078 " movl %%eax, 56(%3)\n"
23079 " movl %%edx, 60(%3)\n"
23080 " addl $-64, %0\n"
23081@@ -252,9 +360,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23082 " shrl $2, %0\n"
23083 " andl $3, %%eax\n"
23084 " cld\n"
23085- "6: rep; movsl\n"
23086+ "6: rep; "__copyuser_seg" movsl\n"
23087 " movl %%eax,%0\n"
23088- "7: rep; movsb\n"
23089+ "7: rep; "__copyuser_seg" movsb\n"
23090 "8:\n"
23091 ".section .fixup,\"ax\"\n"
23092 "9: lea 0(%%eax,%0,4),%0\n"
23093@@ -297,48 +405,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23094 * hyoshiok@miraclelinux.com
23095 */
23096
23097-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23098+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
23099 const void __user *from, unsigned long size)
23100 {
23101 int d0, d1;
23102
23103 __asm__ __volatile__(
23104 " .align 2,0x90\n"
23105- "0: movl 32(%4), %%eax\n"
23106+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23107 " cmpl $67, %0\n"
23108 " jbe 2f\n"
23109- "1: movl 64(%4), %%eax\n"
23110+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23111 " .align 2,0x90\n"
23112- "2: movl 0(%4), %%eax\n"
23113- "21: movl 4(%4), %%edx\n"
23114+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23115+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23116 " movnti %%eax, 0(%3)\n"
23117 " movnti %%edx, 4(%3)\n"
23118- "3: movl 8(%4), %%eax\n"
23119- "31: movl 12(%4),%%edx\n"
23120+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23121+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23122 " movnti %%eax, 8(%3)\n"
23123 " movnti %%edx, 12(%3)\n"
23124- "4: movl 16(%4), %%eax\n"
23125- "41: movl 20(%4), %%edx\n"
23126+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23127+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23128 " movnti %%eax, 16(%3)\n"
23129 " movnti %%edx, 20(%3)\n"
23130- "10: movl 24(%4), %%eax\n"
23131- "51: movl 28(%4), %%edx\n"
23132+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23133+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23134 " movnti %%eax, 24(%3)\n"
23135 " movnti %%edx, 28(%3)\n"
23136- "11: movl 32(%4), %%eax\n"
23137- "61: movl 36(%4), %%edx\n"
23138+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23139+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23140 " movnti %%eax, 32(%3)\n"
23141 " movnti %%edx, 36(%3)\n"
23142- "12: movl 40(%4), %%eax\n"
23143- "71: movl 44(%4), %%edx\n"
23144+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23145+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23146 " movnti %%eax, 40(%3)\n"
23147 " movnti %%edx, 44(%3)\n"
23148- "13: movl 48(%4), %%eax\n"
23149- "81: movl 52(%4), %%edx\n"
23150+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23151+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23152 " movnti %%eax, 48(%3)\n"
23153 " movnti %%edx, 52(%3)\n"
23154- "14: movl 56(%4), %%eax\n"
23155- "91: movl 60(%4), %%edx\n"
23156+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23157+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23158 " movnti %%eax, 56(%3)\n"
23159 " movnti %%edx, 60(%3)\n"
23160 " addl $-64, %0\n"
23161@@ -351,9 +459,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23162 " shrl $2, %0\n"
23163 " andl $3, %%eax\n"
23164 " cld\n"
23165- "6: rep; movsl\n"
23166+ "6: rep; "__copyuser_seg" movsl\n"
23167 " movl %%eax,%0\n"
23168- "7: rep; movsb\n"
23169+ "7: rep; "__copyuser_seg" movsb\n"
23170 "8:\n"
23171 ".section .fixup,\"ax\"\n"
23172 "9: lea 0(%%eax,%0,4),%0\n"
23173@@ -391,48 +499,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23174 return size;
23175 }
23176
23177-static unsigned long __copy_user_intel_nocache(void *to,
23178+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
23179 const void __user *from, unsigned long size)
23180 {
23181 int d0, d1;
23182
23183 __asm__ __volatile__(
23184 " .align 2,0x90\n"
23185- "0: movl 32(%4), %%eax\n"
23186+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23187 " cmpl $67, %0\n"
23188 " jbe 2f\n"
23189- "1: movl 64(%4), %%eax\n"
23190+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23191 " .align 2,0x90\n"
23192- "2: movl 0(%4), %%eax\n"
23193- "21: movl 4(%4), %%edx\n"
23194+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23195+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23196 " movnti %%eax, 0(%3)\n"
23197 " movnti %%edx, 4(%3)\n"
23198- "3: movl 8(%4), %%eax\n"
23199- "31: movl 12(%4),%%edx\n"
23200+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23201+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23202 " movnti %%eax, 8(%3)\n"
23203 " movnti %%edx, 12(%3)\n"
23204- "4: movl 16(%4), %%eax\n"
23205- "41: movl 20(%4), %%edx\n"
23206+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23207+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23208 " movnti %%eax, 16(%3)\n"
23209 " movnti %%edx, 20(%3)\n"
23210- "10: movl 24(%4), %%eax\n"
23211- "51: movl 28(%4), %%edx\n"
23212+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23213+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23214 " movnti %%eax, 24(%3)\n"
23215 " movnti %%edx, 28(%3)\n"
23216- "11: movl 32(%4), %%eax\n"
23217- "61: movl 36(%4), %%edx\n"
23218+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23219+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23220 " movnti %%eax, 32(%3)\n"
23221 " movnti %%edx, 36(%3)\n"
23222- "12: movl 40(%4), %%eax\n"
23223- "71: movl 44(%4), %%edx\n"
23224+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23225+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23226 " movnti %%eax, 40(%3)\n"
23227 " movnti %%edx, 44(%3)\n"
23228- "13: movl 48(%4), %%eax\n"
23229- "81: movl 52(%4), %%edx\n"
23230+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23231+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23232 " movnti %%eax, 48(%3)\n"
23233 " movnti %%edx, 52(%3)\n"
23234- "14: movl 56(%4), %%eax\n"
23235- "91: movl 60(%4), %%edx\n"
23236+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23237+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23238 " movnti %%eax, 56(%3)\n"
23239 " movnti %%edx, 60(%3)\n"
23240 " addl $-64, %0\n"
23241@@ -445,9 +553,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23242 " shrl $2, %0\n"
23243 " andl $3, %%eax\n"
23244 " cld\n"
23245- "6: rep; movsl\n"
23246+ "6: rep; "__copyuser_seg" movsl\n"
23247 " movl %%eax,%0\n"
23248- "7: rep; movsb\n"
23249+ "7: rep; "__copyuser_seg" movsb\n"
23250 "8:\n"
23251 ".section .fixup,\"ax\"\n"
23252 "9: lea 0(%%eax,%0,4),%0\n"
23253@@ -487,32 +595,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23254 */
23255 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23256 unsigned long size);
23257-unsigned long __copy_user_intel(void __user *to, const void *from,
23258+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23259+ unsigned long size);
23260+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23261 unsigned long size);
23262 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23263 const void __user *from, unsigned long size);
23264 #endif /* CONFIG_X86_INTEL_USERCOPY */
23265
23266 /* Generic arbitrary sized copy. */
23267-#define __copy_user(to, from, size) \
23268+#define __copy_user(to, from, size, prefix, set, restore) \
23269 do { \
23270 int __d0, __d1, __d2; \
23271 __asm__ __volatile__( \
23272+ set \
23273 " cmp $7,%0\n" \
23274 " jbe 1f\n" \
23275 " movl %1,%0\n" \
23276 " negl %0\n" \
23277 " andl $7,%0\n" \
23278 " subl %0,%3\n" \
23279- "4: rep; movsb\n" \
23280+ "4: rep; "prefix"movsb\n" \
23281 " movl %3,%0\n" \
23282 " shrl $2,%0\n" \
23283 " andl $3,%3\n" \
23284 " .align 2,0x90\n" \
23285- "0: rep; movsl\n" \
23286+ "0: rep; "prefix"movsl\n" \
23287 " movl %3,%0\n" \
23288- "1: rep; movsb\n" \
23289+ "1: rep; "prefix"movsb\n" \
23290 "2:\n" \
23291+ restore \
23292 ".section .fixup,\"ax\"\n" \
23293 "5: addl %3,%0\n" \
23294 " jmp 2b\n" \
23295@@ -537,14 +649,14 @@ do { \
23296 " negl %0\n" \
23297 " andl $7,%0\n" \
23298 " subl %0,%3\n" \
23299- "4: rep; movsb\n" \
23300+ "4: rep; "__copyuser_seg"movsb\n" \
23301 " movl %3,%0\n" \
23302 " shrl $2,%0\n" \
23303 " andl $3,%3\n" \
23304 " .align 2,0x90\n" \
23305- "0: rep; movsl\n" \
23306+ "0: rep; "__copyuser_seg"movsl\n" \
23307 " movl %3,%0\n" \
23308- "1: rep; movsb\n" \
23309+ "1: rep; "__copyuser_seg"movsb\n" \
23310 "2:\n" \
23311 ".section .fixup,\"ax\"\n" \
23312 "5: addl %3,%0\n" \
23313@@ -627,9 +739,9 @@ survive:
23314 }
23315 #endif
23316 if (movsl_is_ok(to, from, n))
23317- __copy_user(to, from, n);
23318+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23319 else
23320- n = __copy_user_intel(to, from, n);
23321+ n = __generic_copy_to_user_intel(to, from, n);
23322 return n;
23323 }
23324 EXPORT_SYMBOL(__copy_to_user_ll);
23325@@ -649,10 +761,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23326 unsigned long n)
23327 {
23328 if (movsl_is_ok(to, from, n))
23329- __copy_user(to, from, n);
23330+ __copy_user(to, from, n, __copyuser_seg, "", "");
23331 else
23332- n = __copy_user_intel((void __user *)to,
23333- (const void *)from, n);
23334+ n = __generic_copy_from_user_intel(to, from, n);
23335 return n;
23336 }
23337 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23338@@ -679,65 +790,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23339 if (n > 64 && cpu_has_xmm2)
23340 n = __copy_user_intel_nocache(to, from, n);
23341 else
23342- __copy_user(to, from, n);
23343+ __copy_user(to, from, n, __copyuser_seg, "", "");
23344 #else
23345- __copy_user(to, from, n);
23346+ __copy_user(to, from, n, __copyuser_seg, "", "");
23347 #endif
23348 return n;
23349 }
23350 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23351
23352-/**
23353- * copy_to_user: - Copy a block of data into user space.
23354- * @to: Destination address, in user space.
23355- * @from: Source address, in kernel space.
23356- * @n: Number of bytes to copy.
23357- *
23358- * Context: User context only. This function may sleep.
23359- *
23360- * Copy data from kernel space to user space.
23361- *
23362- * Returns number of bytes that could not be copied.
23363- * On success, this will be zero.
23364- */
23365-unsigned long
23366-copy_to_user(void __user *to, const void *from, unsigned long n)
23367-{
23368- if (access_ok(VERIFY_WRITE, to, n))
23369- n = __copy_to_user(to, from, n);
23370- return n;
23371-}
23372-EXPORT_SYMBOL(copy_to_user);
23373-
23374-/**
23375- * copy_from_user: - Copy a block of data from user space.
23376- * @to: Destination address, in kernel space.
23377- * @from: Source address, in user space.
23378- * @n: Number of bytes to copy.
23379- *
23380- * Context: User context only. This function may sleep.
23381- *
23382- * Copy data from user space to kernel space.
23383- *
23384- * Returns number of bytes that could not be copied.
23385- * On success, this will be zero.
23386- *
23387- * If some data could not be copied, this function will pad the copied
23388- * data to the requested size using zero bytes.
23389- */
23390-unsigned long
23391-_copy_from_user(void *to, const void __user *from, unsigned long n)
23392-{
23393- if (access_ok(VERIFY_READ, from, n))
23394- n = __copy_from_user(to, from, n);
23395- else
23396- memset(to, 0, n);
23397- return n;
23398-}
23399-EXPORT_SYMBOL(_copy_from_user);
23400-
23401 void copy_from_user_overflow(void)
23402 {
23403 WARN(1, "Buffer overflow detected!\n");
23404 }
23405 EXPORT_SYMBOL(copy_from_user_overflow);
23406+
23407+void copy_to_user_overflow(void)
23408+{
23409+ WARN(1, "Buffer overflow detected!\n");
23410+}
23411+EXPORT_SYMBOL(copy_to_user_overflow);
23412+
23413+#ifdef CONFIG_PAX_MEMORY_UDEREF
23414+void __set_fs(mm_segment_t x)
23415+{
23416+ switch (x.seg) {
23417+ case 0:
23418+ loadsegment(gs, 0);
23419+ break;
23420+ case TASK_SIZE_MAX:
23421+ loadsegment(gs, __USER_DS);
23422+ break;
23423+ case -1UL:
23424+ loadsegment(gs, __KERNEL_DS);
23425+ break;
23426+ default:
23427+ BUG();
23428+ }
23429+ return;
23430+}
23431+EXPORT_SYMBOL(__set_fs);
23432+
23433+void set_fs(mm_segment_t x)
23434+{
23435+ current_thread_info()->addr_limit = x;
23436+ __set_fs(x);
23437+}
23438+EXPORT_SYMBOL(set_fs);
23439+#endif
23440diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23441index e5b130b..6690d31 100644
23442--- a/arch/x86/lib/usercopy_64.c
23443+++ b/arch/x86/lib/usercopy_64.c
23444@@ -16,6 +16,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23445 {
23446 long __d0;
23447 might_fault();
23448+
23449+#ifdef CONFIG_PAX_MEMORY_UDEREF
23450+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23451+ addr += PAX_USER_SHADOW_BASE;
23452+#endif
23453+
23454 /* no memory constraint because it doesn't change any memory gcc knows
23455 about */
23456 asm volatile(
23457@@ -52,12 +58,20 @@ unsigned long clear_user(void __user *to, unsigned long n)
23458 }
23459 EXPORT_SYMBOL(clear_user);
23460
23461-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23462+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23463 {
23464- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23465- return copy_user_generic((__force void *)to, (__force void *)from, len);
23466- }
23467- return len;
23468+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23469+
23470+#ifdef CONFIG_PAX_MEMORY_UDEREF
23471+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23472+ to += PAX_USER_SHADOW_BASE;
23473+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23474+ from += PAX_USER_SHADOW_BASE;
23475+#endif
23476+
23477+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23478+ }
23479+ return len;
23480 }
23481 EXPORT_SYMBOL(copy_in_user);
23482
23483@@ -67,7 +81,7 @@ EXPORT_SYMBOL(copy_in_user);
23484 * it is not necessary to optimize tail handling.
23485 */
23486 unsigned long
23487-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23488+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23489 {
23490 char c;
23491 unsigned zero_len;
23492@@ -84,3 +98,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23493 break;
23494 return len;
23495 }
23496+
23497+void copy_from_user_overflow(void)
23498+{
23499+ WARN(1, "Buffer overflow detected!\n");
23500+}
23501+EXPORT_SYMBOL(copy_from_user_overflow);
23502+
23503+void copy_to_user_overflow(void)
23504+{
23505+ WARN(1, "Buffer overflow detected!\n");
23506+}
23507+EXPORT_SYMBOL(copy_to_user_overflow);
23508diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23509index 903ec1e..c4166b2 100644
23510--- a/arch/x86/mm/extable.c
23511+++ b/arch/x86/mm/extable.c
23512@@ -6,12 +6,24 @@
23513 static inline unsigned long
23514 ex_insn_addr(const struct exception_table_entry *x)
23515 {
23516- return (unsigned long)&x->insn + x->insn;
23517+ unsigned long reloc = 0;
23518+
23519+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23520+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
23521+#endif
23522+
23523+ return (unsigned long)&x->insn + x->insn + reloc;
23524 }
23525 static inline unsigned long
23526 ex_fixup_addr(const struct exception_table_entry *x)
23527 {
23528- return (unsigned long)&x->fixup + x->fixup;
23529+ unsigned long reloc = 0;
23530+
23531+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23532+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
23533+#endif
23534+
23535+ return (unsigned long)&x->fixup + x->fixup + reloc;
23536 }
23537
23538 int fixup_exception(struct pt_regs *regs)
23539@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
23540 unsigned long new_ip;
23541
23542 #ifdef CONFIG_PNPBIOS
23543- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23544+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23545 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23546 extern u32 pnp_bios_is_utter_crap;
23547 pnp_bios_is_utter_crap = 1;
23548@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
23549 i += 4;
23550 p->fixup -= i;
23551 i += 4;
23552+
23553+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23554+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
23555+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
23556+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
23557+#endif
23558+
23559 }
23560 }
23561
23562diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23563index 76dcd9d..e9dffde 100644
23564--- a/arch/x86/mm/fault.c
23565+++ b/arch/x86/mm/fault.c
23566@@ -13,11 +13,18 @@
23567 #include <linux/perf_event.h> /* perf_sw_event */
23568 #include <linux/hugetlb.h> /* hstate_index_to_shift */
23569 #include <linux/prefetch.h> /* prefetchw */
23570+#include <linux/unistd.h>
23571+#include <linux/compiler.h>
23572
23573 #include <asm/traps.h> /* dotraplinkage, ... */
23574 #include <asm/pgalloc.h> /* pgd_*(), ... */
23575 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23576 #include <asm/fixmap.h> /* VSYSCALL_START */
23577+#include <asm/tlbflush.h>
23578+
23579+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23580+#include <asm/stacktrace.h>
23581+#endif
23582
23583 /*
23584 * Page fault error code bits:
23585@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
23586 int ret = 0;
23587
23588 /* kprobe_running() needs smp_processor_id() */
23589- if (kprobes_built_in() && !user_mode_vm(regs)) {
23590+ if (kprobes_built_in() && !user_mode(regs)) {
23591 preempt_disable();
23592 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23593 ret = 1;
23594@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23595 return !instr_lo || (instr_lo>>1) == 1;
23596 case 0x00:
23597 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23598- if (probe_kernel_address(instr, opcode))
23599+ if (user_mode(regs)) {
23600+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23601+ return 0;
23602+ } else if (probe_kernel_address(instr, opcode))
23603 return 0;
23604
23605 *prefetch = (instr_lo == 0xF) &&
23606@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23607 while (instr < max_instr) {
23608 unsigned char opcode;
23609
23610- if (probe_kernel_address(instr, opcode))
23611+ if (user_mode(regs)) {
23612+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23613+ break;
23614+ } else if (probe_kernel_address(instr, opcode))
23615 break;
23616
23617 instr++;
23618@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23619 force_sig_info(si_signo, &info, tsk);
23620 }
23621
23622+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23623+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23624+#endif
23625+
23626+#ifdef CONFIG_PAX_EMUTRAMP
23627+static int pax_handle_fetch_fault(struct pt_regs *regs);
23628+#endif
23629+
23630+#ifdef CONFIG_PAX_PAGEEXEC
23631+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23632+{
23633+ pgd_t *pgd;
23634+ pud_t *pud;
23635+ pmd_t *pmd;
23636+
23637+ pgd = pgd_offset(mm, address);
23638+ if (!pgd_present(*pgd))
23639+ return NULL;
23640+ pud = pud_offset(pgd, address);
23641+ if (!pud_present(*pud))
23642+ return NULL;
23643+ pmd = pmd_offset(pud, address);
23644+ if (!pmd_present(*pmd))
23645+ return NULL;
23646+ return pmd;
23647+}
23648+#endif
23649+
23650 DEFINE_SPINLOCK(pgd_lock);
23651 LIST_HEAD(pgd_list);
23652
23653@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23654 for (address = VMALLOC_START & PMD_MASK;
23655 address >= TASK_SIZE && address < FIXADDR_TOP;
23656 address += PMD_SIZE) {
23657+
23658+#ifdef CONFIG_PAX_PER_CPU_PGD
23659+ unsigned long cpu;
23660+#else
23661 struct page *page;
23662+#endif
23663
23664 spin_lock(&pgd_lock);
23665+
23666+#ifdef CONFIG_PAX_PER_CPU_PGD
23667+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23668+ pgd_t *pgd = get_cpu_pgd(cpu);
23669+ pmd_t *ret;
23670+#else
23671 list_for_each_entry(page, &pgd_list, lru) {
23672+ pgd_t *pgd = page_address(page);
23673 spinlock_t *pgt_lock;
23674 pmd_t *ret;
23675
23676@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23677 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23678
23679 spin_lock(pgt_lock);
23680- ret = vmalloc_sync_one(page_address(page), address);
23681+#endif
23682+
23683+ ret = vmalloc_sync_one(pgd, address);
23684+
23685+#ifndef CONFIG_PAX_PER_CPU_PGD
23686 spin_unlock(pgt_lock);
23687+#endif
23688
23689 if (!ret)
23690 break;
23691@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23692 * an interrupt in the middle of a task switch..
23693 */
23694 pgd_paddr = read_cr3();
23695+
23696+#ifdef CONFIG_PAX_PER_CPU_PGD
23697+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23698+#endif
23699+
23700 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23701 if (!pmd_k)
23702 return -1;
23703@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23704 * happen within a race in page table update. In the later
23705 * case just flush:
23706 */
23707+
23708+#ifdef CONFIG_PAX_PER_CPU_PGD
23709+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23710+ pgd = pgd_offset_cpu(smp_processor_id(), address);
23711+#else
23712 pgd = pgd_offset(current->active_mm, address);
23713+#endif
23714+
23715 pgd_ref = pgd_offset_k(address);
23716 if (pgd_none(*pgd_ref))
23717 return -1;
23718@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23719 static int is_errata100(struct pt_regs *regs, unsigned long address)
23720 {
23721 #ifdef CONFIG_X86_64
23722- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23723+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23724 return 1;
23725 #endif
23726 return 0;
23727@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23728 }
23729
23730 static const char nx_warning[] = KERN_CRIT
23731-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23732+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23733
23734 static void
23735 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23736@@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23737 if (!oops_may_print())
23738 return;
23739
23740- if (error_code & PF_INSTR) {
23741+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23742 unsigned int level;
23743
23744 pte_t *pte = lookup_address(address, &level);
23745
23746 if (pte && pte_present(*pte) && !pte_exec(*pte))
23747- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
23748+ printk(nx_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
23749 }
23750
23751+#ifdef CONFIG_PAX_KERNEXEC
23752+ if (init_mm.start_code <= address && address < init_mm.end_code) {
23753+ if (current->signal->curr_ip)
23754+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23755+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23756+ else
23757+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23758+ current->comm, task_pid_nr(current), current_uid(), current_euid());
23759+ }
23760+#endif
23761+
23762 printk(KERN_ALERT "BUG: unable to handle kernel ");
23763 if (address < PAGE_SIZE)
23764 printk(KERN_CONT "NULL pointer dereference");
23765@@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23766 }
23767 #endif
23768
23769+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23770+ if (pax_is_fetch_fault(regs, error_code, address)) {
23771+
23772+#ifdef CONFIG_PAX_EMUTRAMP
23773+ switch (pax_handle_fetch_fault(regs)) {
23774+ case 2:
23775+ return;
23776+ }
23777+#endif
23778+
23779+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23780+ do_group_exit(SIGKILL);
23781+ }
23782+#endif
23783+
23784 if (unlikely(show_unhandled_signals))
23785 show_signal_msg(regs, error_code, address, tsk);
23786
23787@@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23788 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23789 printk(KERN_ERR
23790 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23791- tsk->comm, tsk->pid, address);
23792+ tsk->comm, task_pid_nr(tsk), address);
23793 code = BUS_MCEERR_AR;
23794 }
23795 #endif
23796@@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23797 return 1;
23798 }
23799
23800+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23801+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23802+{
23803+ pte_t *pte;
23804+ pmd_t *pmd;
23805+ spinlock_t *ptl;
23806+ unsigned char pte_mask;
23807+
23808+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23809+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
23810+ return 0;
23811+
23812+ /* PaX: it's our fault, let's handle it if we can */
23813+
23814+ /* PaX: take a look at read faults before acquiring any locks */
23815+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23816+ /* instruction fetch attempt from a protected page in user mode */
23817+ up_read(&mm->mmap_sem);
23818+
23819+#ifdef CONFIG_PAX_EMUTRAMP
23820+ switch (pax_handle_fetch_fault(regs)) {
23821+ case 2:
23822+ return 1;
23823+ }
23824+#endif
23825+
23826+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23827+ do_group_exit(SIGKILL);
23828+ }
23829+
23830+ pmd = pax_get_pmd(mm, address);
23831+ if (unlikely(!pmd))
23832+ return 0;
23833+
23834+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23835+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23836+ pte_unmap_unlock(pte, ptl);
23837+ return 0;
23838+ }
23839+
23840+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23841+ /* write attempt to a protected page in user mode */
23842+ pte_unmap_unlock(pte, ptl);
23843+ return 0;
23844+ }
23845+
23846+#ifdef CONFIG_SMP
23847+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23848+#else
23849+ if (likely(address > get_limit(regs->cs)))
23850+#endif
23851+ {
23852+ set_pte(pte, pte_mkread(*pte));
23853+ __flush_tlb_one(address);
23854+ pte_unmap_unlock(pte, ptl);
23855+ up_read(&mm->mmap_sem);
23856+ return 1;
23857+ }
23858+
23859+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23860+
23861+ /*
23862+ * PaX: fill DTLB with user rights and retry
23863+ */
23864+ __asm__ __volatile__ (
23865+ "orb %2,(%1)\n"
23866+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23867+/*
23868+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23869+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23870+ * page fault when examined during a TLB load attempt. this is true not only
23871+ * for PTEs holding a non-present entry but also present entries that will
23872+ * raise a page fault (such as those set up by PaX, or the copy-on-write
23873+ * mechanism). in effect it means that we do *not* need to flush the TLBs
23874+ * for our target pages since their PTEs are simply not in the TLBs at all.
23875+
23876+ * the best thing in omitting it is that we gain around 15-20% speed in the
23877+ * fast path of the page fault handler and can get rid of tracing since we
23878+ * can no longer flush unintended entries.
23879+ */
23880+ "invlpg (%0)\n"
23881+#endif
23882+ __copyuser_seg"testb $0,(%0)\n"
23883+ "xorb %3,(%1)\n"
23884+ :
23885+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23886+ : "memory", "cc");
23887+ pte_unmap_unlock(pte, ptl);
23888+ up_read(&mm->mmap_sem);
23889+ return 1;
23890+}
23891+#endif
23892+
23893 /*
23894 * Handle a spurious fault caused by a stale TLB entry.
23895 *
23896@@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23897 static inline int
23898 access_error(unsigned long error_code, struct vm_area_struct *vma)
23899 {
23900+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23901+ return 1;
23902+
23903 if (error_code & PF_WRITE) {
23904 /* write, present and write, not present: */
23905 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23906@@ -1005,18 +1197,33 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23907 {
23908 struct vm_area_struct *vma;
23909 struct task_struct *tsk;
23910- unsigned long address;
23911 struct mm_struct *mm;
23912 int fault;
23913 int write = error_code & PF_WRITE;
23914 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23915 (write ? FAULT_FLAG_WRITE : 0);
23916
23917- tsk = current;
23918- mm = tsk->mm;
23919-
23920 /* Get the faulting address: */
23921- address = read_cr2();
23922+ unsigned long address = read_cr2();
23923+
23924+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23925+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23926+ if (!search_exception_tables(regs->ip)) {
23927+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23928+ bad_area_nosemaphore(regs, error_code, address);
23929+ return;
23930+ }
23931+ if (address < PAX_USER_SHADOW_BASE) {
23932+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23933+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23934+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23935+ } else
23936+ address -= PAX_USER_SHADOW_BASE;
23937+ }
23938+#endif
23939+
23940+ tsk = current;
23941+ mm = tsk->mm;
23942
23943 /*
23944 * Detect and handle instructions that would cause a page fault for
23945@@ -1077,7 +1284,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23946 * User-mode registers count as a user access even for any
23947 * potential system fault or CPU buglet:
23948 */
23949- if (user_mode_vm(regs)) {
23950+ if (user_mode(regs)) {
23951 local_irq_enable();
23952 error_code |= PF_USER;
23953 } else {
23954@@ -1132,6 +1339,11 @@ retry:
23955 might_sleep();
23956 }
23957
23958+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23959+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
23960+ return;
23961+#endif
23962+
23963 vma = find_vma(mm, address);
23964 if (unlikely(!vma)) {
23965 bad_area(regs, error_code, address);
23966@@ -1143,18 +1355,24 @@ retry:
23967 bad_area(regs, error_code, address);
23968 return;
23969 }
23970- if (error_code & PF_USER) {
23971- /*
23972- * Accessing the stack below %sp is always a bug.
23973- * The large cushion allows instructions like enter
23974- * and pusha to work. ("enter $65535, $31" pushes
23975- * 32 pointers and then decrements %sp by 65535.)
23976- */
23977- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
23978- bad_area(regs, error_code, address);
23979- return;
23980- }
23981+ /*
23982+ * Accessing the stack below %sp is always a bug.
23983+ * The large cushion allows instructions like enter
23984+ * and pusha to work. ("enter $65535, $31" pushes
23985+ * 32 pointers and then decrements %sp by 65535.)
23986+ */
23987+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
23988+ bad_area(regs, error_code, address);
23989+ return;
23990 }
23991+
23992+#ifdef CONFIG_PAX_SEGMEXEC
23993+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
23994+ bad_area(regs, error_code, address);
23995+ return;
23996+ }
23997+#endif
23998+
23999 if (unlikely(expand_stack(vma, address))) {
24000 bad_area(regs, error_code, address);
24001 return;
24002@@ -1209,3 +1427,292 @@ good_area:
24003
24004 up_read(&mm->mmap_sem);
24005 }
24006+
24007+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24008+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24009+{
24010+ struct mm_struct *mm = current->mm;
24011+ unsigned long ip = regs->ip;
24012+
24013+ if (v8086_mode(regs))
24014+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24015+
24016+#ifdef CONFIG_PAX_PAGEEXEC
24017+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24018+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24019+ return true;
24020+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24021+ return true;
24022+ return false;
24023+ }
24024+#endif
24025+
24026+#ifdef CONFIG_PAX_SEGMEXEC
24027+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24028+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24029+ return true;
24030+ return false;
24031+ }
24032+#endif
24033+
24034+ return false;
24035+}
24036+#endif
24037+
24038+#ifdef CONFIG_PAX_EMUTRAMP
24039+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24040+{
24041+ int err;
24042+
24043+ do { /* PaX: libffi trampoline emulation */
24044+ unsigned char mov, jmp;
24045+ unsigned int addr1, addr2;
24046+
24047+#ifdef CONFIG_X86_64
24048+ if ((regs->ip + 9) >> 32)
24049+ break;
24050+#endif
24051+
24052+ err = get_user(mov, (unsigned char __user *)regs->ip);
24053+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24054+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24055+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24056+
24057+ if (err)
24058+ break;
24059+
24060+ if (mov == 0xB8 && jmp == 0xE9) {
24061+ regs->ax = addr1;
24062+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24063+ return 2;
24064+ }
24065+ } while (0);
24066+
24067+ do { /* PaX: gcc trampoline emulation #1 */
24068+ unsigned char mov1, mov2;
24069+ unsigned short jmp;
24070+ unsigned int addr1, addr2;
24071+
24072+#ifdef CONFIG_X86_64
24073+ if ((regs->ip + 11) >> 32)
24074+ break;
24075+#endif
24076+
24077+ err = get_user(mov1, (unsigned char __user *)regs->ip);
24078+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24079+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24080+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24081+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24082+
24083+ if (err)
24084+ break;
24085+
24086+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24087+ regs->cx = addr1;
24088+ regs->ax = addr2;
24089+ regs->ip = addr2;
24090+ return 2;
24091+ }
24092+ } while (0);
24093+
24094+ do { /* PaX: gcc trampoline emulation #2 */
24095+ unsigned char mov, jmp;
24096+ unsigned int addr1, addr2;
24097+
24098+#ifdef CONFIG_X86_64
24099+ if ((regs->ip + 9) >> 32)
24100+ break;
24101+#endif
24102+
24103+ err = get_user(mov, (unsigned char __user *)regs->ip);
24104+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24105+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24106+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24107+
24108+ if (err)
24109+ break;
24110+
24111+ if (mov == 0xB9 && jmp == 0xE9) {
24112+ regs->cx = addr1;
24113+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24114+ return 2;
24115+ }
24116+ } while (0);
24117+
24118+ return 1; /* PaX in action */
24119+}
24120+
24121+#ifdef CONFIG_X86_64
24122+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24123+{
24124+ int err;
24125+
24126+ do { /* PaX: libffi trampoline emulation */
24127+ unsigned short mov1, mov2, jmp1;
24128+ unsigned char stcclc, jmp2;
24129+ unsigned long addr1, addr2;
24130+
24131+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24132+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24133+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24134+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24135+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24136+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24137+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24138+
24139+ if (err)
24140+ break;
24141+
24142+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24143+ regs->r11 = addr1;
24144+ regs->r10 = addr2;
24145+ if (stcclc == 0xF8)
24146+ regs->flags &= ~X86_EFLAGS_CF;
24147+ else
24148+ regs->flags |= X86_EFLAGS_CF;
24149+ regs->ip = addr1;
24150+ return 2;
24151+ }
24152+ } while (0);
24153+
24154+ do { /* PaX: gcc trampoline emulation #1 */
24155+ unsigned short mov1, mov2, jmp1;
24156+ unsigned char jmp2;
24157+ unsigned int addr1;
24158+ unsigned long addr2;
24159+
24160+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24161+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24162+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24163+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24164+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24165+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24166+
24167+ if (err)
24168+ break;
24169+
24170+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24171+ regs->r11 = addr1;
24172+ regs->r10 = addr2;
24173+ regs->ip = addr1;
24174+ return 2;
24175+ }
24176+ } while (0);
24177+
24178+ do { /* PaX: gcc trampoline emulation #2 */
24179+ unsigned short mov1, mov2, jmp1;
24180+ unsigned char jmp2;
24181+ unsigned long addr1, addr2;
24182+
24183+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24184+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24185+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24186+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24187+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24188+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24189+
24190+ if (err)
24191+ break;
24192+
24193+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24194+ regs->r11 = addr1;
24195+ regs->r10 = addr2;
24196+ regs->ip = addr1;
24197+ return 2;
24198+ }
24199+ } while (0);
24200+
24201+ return 1; /* PaX in action */
24202+}
24203+#endif
24204+
24205+/*
24206+ * PaX: decide what to do with offenders (regs->ip = fault address)
24207+ *
24208+ * returns 1 when task should be killed
24209+ * 2 when gcc trampoline was detected
24210+ */
24211+static int pax_handle_fetch_fault(struct pt_regs *regs)
24212+{
24213+ if (v8086_mode(regs))
24214+ return 1;
24215+
24216+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24217+ return 1;
24218+
24219+#ifdef CONFIG_X86_32
24220+ return pax_handle_fetch_fault_32(regs);
24221+#else
24222+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24223+ return pax_handle_fetch_fault_32(regs);
24224+ else
24225+ return pax_handle_fetch_fault_64(regs);
24226+#endif
24227+}
24228+#endif
24229+
24230+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24231+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24232+{
24233+ long i;
24234+
24235+ printk(KERN_ERR "PAX: bytes at PC: ");
24236+ for (i = 0; i < 20; i++) {
24237+ unsigned char c;
24238+ if (get_user(c, (unsigned char __force_user *)pc+i))
24239+ printk(KERN_CONT "?? ");
24240+ else
24241+ printk(KERN_CONT "%02x ", c);
24242+ }
24243+ printk("\n");
24244+
24245+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24246+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
24247+ unsigned long c;
24248+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
24249+#ifdef CONFIG_X86_32
24250+ printk(KERN_CONT "???????? ");
24251+#else
24252+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24253+ printk(KERN_CONT "???????? ???????? ");
24254+ else
24255+ printk(KERN_CONT "???????????????? ");
24256+#endif
24257+ } else {
24258+#ifdef CONFIG_X86_64
24259+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24260+ printk(KERN_CONT "%08x ", (unsigned int)c);
24261+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24262+ } else
24263+#endif
24264+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24265+ }
24266+ }
24267+ printk("\n");
24268+}
24269+#endif
24270+
24271+/**
24272+ * probe_kernel_write(): safely attempt to write to a location
24273+ * @dst: address to write to
24274+ * @src: pointer to the data that shall be written
24275+ * @size: size of the data chunk
24276+ *
24277+ * Safely write to address @dst from the buffer at @src. If a kernel fault
24278+ * happens, handle that and return -EFAULT.
24279+ */
24280+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24281+{
24282+ long ret;
24283+ mm_segment_t old_fs = get_fs();
24284+
24285+ set_fs(KERNEL_DS);
24286+ pagefault_disable();
24287+ pax_open_kernel();
24288+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24289+ pax_close_kernel();
24290+ pagefault_enable();
24291+ set_fs(old_fs);
24292+
24293+ return ret ? -EFAULT : 0;
24294+}
24295diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24296index dd74e46..7d26398 100644
24297--- a/arch/x86/mm/gup.c
24298+++ b/arch/x86/mm/gup.c
24299@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24300 addr = start;
24301 len = (unsigned long) nr_pages << PAGE_SHIFT;
24302 end = start + len;
24303- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24304+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24305 (void __user *)start, len)))
24306 return 0;
24307
24308diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24309index 6f31ee5..8ee4164 100644
24310--- a/arch/x86/mm/highmem_32.c
24311+++ b/arch/x86/mm/highmem_32.c
24312@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
24313 idx = type + KM_TYPE_NR*smp_processor_id();
24314 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24315 BUG_ON(!pte_none(*(kmap_pte-idx)));
24316+
24317+ pax_open_kernel();
24318 set_pte(kmap_pte-idx, mk_pte(page, prot));
24319+ pax_close_kernel();
24320+
24321 arch_flush_lazy_mmu_mode();
24322
24323 return (void *)vaddr;
24324diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24325index b91e485..d00e7c9 100644
24326--- a/arch/x86/mm/hugetlbpage.c
24327+++ b/arch/x86/mm/hugetlbpage.c
24328@@ -277,13 +277,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24329 struct hstate *h = hstate_file(file);
24330 struct mm_struct *mm = current->mm;
24331 struct vm_area_struct *vma;
24332- unsigned long start_addr;
24333+ unsigned long start_addr, pax_task_size = TASK_SIZE;
24334+
24335+#ifdef CONFIG_PAX_SEGMEXEC
24336+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24337+ pax_task_size = SEGMEXEC_TASK_SIZE;
24338+#endif
24339+
24340+ pax_task_size -= PAGE_SIZE;
24341
24342 if (len > mm->cached_hole_size) {
24343- start_addr = mm->free_area_cache;
24344+ start_addr = mm->free_area_cache;
24345 } else {
24346- start_addr = TASK_UNMAPPED_BASE;
24347- mm->cached_hole_size = 0;
24348+ start_addr = mm->mmap_base;
24349+ mm->cached_hole_size = 0;
24350 }
24351
24352 full_search:
24353@@ -291,26 +298,27 @@ full_search:
24354
24355 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24356 /* At this point: (!vma || addr < vma->vm_end). */
24357- if (TASK_SIZE - len < addr) {
24358+ if (pax_task_size - len < addr) {
24359 /*
24360 * Start a new search - just in case we missed
24361 * some holes.
24362 */
24363- if (start_addr != TASK_UNMAPPED_BASE) {
24364- start_addr = TASK_UNMAPPED_BASE;
24365+ if (start_addr != mm->mmap_base) {
24366+ start_addr = mm->mmap_base;
24367 mm->cached_hole_size = 0;
24368 goto full_search;
24369 }
24370 return -ENOMEM;
24371 }
24372- if (!vma || addr + len <= vma->vm_start) {
24373- mm->free_area_cache = addr + len;
24374- return addr;
24375- }
24376+ if (check_heap_stack_gap(vma, addr, len))
24377+ break;
24378 if (addr + mm->cached_hole_size < vma->vm_start)
24379 mm->cached_hole_size = vma->vm_start - addr;
24380 addr = ALIGN(vma->vm_end, huge_page_size(h));
24381 }
24382+
24383+ mm->free_area_cache = addr + len;
24384+ return addr;
24385 }
24386
24387 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24388@@ -321,9 +329,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24389 struct mm_struct *mm = current->mm;
24390 struct vm_area_struct *vma;
24391 unsigned long base = mm->mmap_base;
24392- unsigned long addr = addr0;
24393+ unsigned long addr;
24394 unsigned long largest_hole = mm->cached_hole_size;
24395- unsigned long start_addr;
24396
24397 /* don't allow allocations above current base */
24398 if (mm->free_area_cache > base)
24399@@ -333,16 +340,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24400 largest_hole = 0;
24401 mm->free_area_cache = base;
24402 }
24403-try_again:
24404- start_addr = mm->free_area_cache;
24405
24406 /* make sure it can fit in the remaining address space */
24407 if (mm->free_area_cache < len)
24408 goto fail;
24409
24410 /* either no address requested or can't fit in requested address hole */
24411- addr = (mm->free_area_cache - len) & huge_page_mask(h);
24412+ addr = mm->free_area_cache - len;
24413 do {
24414+ addr &= huge_page_mask(h);
24415 /*
24416 * Lookup failure means no vma is above this address,
24417 * i.e. return with success:
24418@@ -351,10 +357,10 @@ try_again:
24419 if (!vma)
24420 return addr;
24421
24422- if (addr + len <= vma->vm_start) {
24423+ if (check_heap_stack_gap(vma, addr, len)) {
24424 /* remember the address as a hint for next time */
24425- mm->cached_hole_size = largest_hole;
24426- return (mm->free_area_cache = addr);
24427+ mm->cached_hole_size = largest_hole;
24428+ return (mm->free_area_cache = addr);
24429 } else if (mm->free_area_cache == vma->vm_end) {
24430 /* pull free_area_cache down to the first hole */
24431 mm->free_area_cache = vma->vm_start;
24432@@ -363,29 +369,34 @@ try_again:
24433
24434 /* remember the largest hole we saw so far */
24435 if (addr + largest_hole < vma->vm_start)
24436- largest_hole = vma->vm_start - addr;
24437+ largest_hole = vma->vm_start - addr;
24438
24439 /* try just below the current vma->vm_start */
24440- addr = (vma->vm_start - len) & huge_page_mask(h);
24441- } while (len <= vma->vm_start);
24442+ addr = skip_heap_stack_gap(vma, len);
24443+ } while (!IS_ERR_VALUE(addr));
24444
24445 fail:
24446 /*
24447- * if hint left us with no space for the requested
24448- * mapping then try again:
24449- */
24450- if (start_addr != base) {
24451- mm->free_area_cache = base;
24452- largest_hole = 0;
24453- goto try_again;
24454- }
24455- /*
24456 * A failed mmap() very likely causes application failure,
24457 * so fall back to the bottom-up function here. This scenario
24458 * can happen with large stack limits and large mmap()
24459 * allocations.
24460 */
24461- mm->free_area_cache = TASK_UNMAPPED_BASE;
24462+
24463+#ifdef CONFIG_PAX_SEGMEXEC
24464+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24465+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24466+ else
24467+#endif
24468+
24469+ mm->mmap_base = TASK_UNMAPPED_BASE;
24470+
24471+#ifdef CONFIG_PAX_RANDMMAP
24472+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24473+ mm->mmap_base += mm->delta_mmap;
24474+#endif
24475+
24476+ mm->free_area_cache = mm->mmap_base;
24477 mm->cached_hole_size = ~0UL;
24478 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24479 len, pgoff, flags);
24480@@ -393,6 +404,7 @@ fail:
24481 /*
24482 * Restore the topdown base:
24483 */
24484+ mm->mmap_base = base;
24485 mm->free_area_cache = base;
24486 mm->cached_hole_size = ~0UL;
24487
24488@@ -406,10 +418,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24489 struct hstate *h = hstate_file(file);
24490 struct mm_struct *mm = current->mm;
24491 struct vm_area_struct *vma;
24492+ unsigned long pax_task_size = TASK_SIZE;
24493
24494 if (len & ~huge_page_mask(h))
24495 return -EINVAL;
24496- if (len > TASK_SIZE)
24497+
24498+#ifdef CONFIG_PAX_SEGMEXEC
24499+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24500+ pax_task_size = SEGMEXEC_TASK_SIZE;
24501+#endif
24502+
24503+ pax_task_size -= PAGE_SIZE;
24504+
24505+ if (len > pax_task_size)
24506 return -ENOMEM;
24507
24508 if (flags & MAP_FIXED) {
24509@@ -421,8 +442,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24510 if (addr) {
24511 addr = ALIGN(addr, huge_page_size(h));
24512 vma = find_vma(mm, addr);
24513- if (TASK_SIZE - len >= addr &&
24514- (!vma || addr + len <= vma->vm_start))
24515+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24516 return addr;
24517 }
24518 if (mm->get_unmapped_area == arch_get_unmapped_area)
24519diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24520index bc4e9d8..ca4c14b 100644
24521--- a/arch/x86/mm/init.c
24522+++ b/arch/x86/mm/init.c
24523@@ -16,6 +16,8 @@
24524 #include <asm/tlb.h>
24525 #include <asm/proto.h>
24526 #include <asm/dma.h> /* for MAX_DMA_PFN */
24527+#include <asm/desc.h>
24528+#include <asm/bios_ebda.h>
24529
24530 unsigned long __initdata pgt_buf_start;
24531 unsigned long __meminitdata pgt_buf_end;
24532@@ -38,7 +40,7 @@ struct map_range {
24533 static void __init find_early_table_space(struct map_range *mr, unsigned long end,
24534 int use_pse, int use_gbpages)
24535 {
24536- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24537+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24538 phys_addr_t base;
24539
24540 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
24541@@ -317,10 +319,37 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24542 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24543 * mmio resources as well as potential bios/acpi data regions.
24544 */
24545+
24546+#ifdef CONFIG_GRKERNSEC_KMEM
24547+static unsigned int ebda_start __read_only;
24548+static unsigned int ebda_end __read_only;
24549+#endif
24550+
24551 int devmem_is_allowed(unsigned long pagenr)
24552 {
24553+#ifdef CONFIG_GRKERNSEC_KMEM
24554+ /* allow BDA */
24555+ if (!pagenr)
24556+ return 1;
24557+ /* allow EBDA */
24558+ if (pagenr >= ebda_start && pagenr < ebda_end)
24559+ return 1;
24560+#else
24561+ if (!pagenr)
24562+ return 1;
24563+#ifdef CONFIG_VM86
24564+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24565+ return 1;
24566+#endif
24567+#endif
24568+
24569+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24570+ return 1;
24571+#ifdef CONFIG_GRKERNSEC_KMEM
24572+ /* throw out everything else below 1MB */
24573 if (pagenr <= 256)
24574- return 1;
24575+ return 0;
24576+#endif
24577 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24578 return 0;
24579 if (!page_is_ram(pagenr))
24580@@ -377,8 +406,116 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24581 #endif
24582 }
24583
24584+#ifdef CONFIG_GRKERNSEC_KMEM
24585+static inline void gr_init_ebda(void)
24586+{
24587+ unsigned int ebda_addr;
24588+ unsigned int ebda_size = 0;
24589+
24590+ ebda_addr = get_bios_ebda();
24591+ if (ebda_addr) {
24592+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
24593+ ebda_size <<= 10;
24594+ }
24595+ if (ebda_addr && ebda_size) {
24596+ ebda_start = ebda_addr >> PAGE_SHIFT;
24597+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
24598+ } else {
24599+ ebda_start = 0x9f000 >> PAGE_SHIFT;
24600+ ebda_end = 0xa0000 >> PAGE_SHIFT;
24601+ }
24602+}
24603+#else
24604+static inline void gr_init_ebda(void) { }
24605+#endif
24606+
24607 void free_initmem(void)
24608 {
24609+#ifdef CONFIG_PAX_KERNEXEC
24610+#ifdef CONFIG_X86_32
24611+ /* PaX: limit KERNEL_CS to actual size */
24612+ unsigned long addr, limit;
24613+ struct desc_struct d;
24614+ int cpu;
24615+#else
24616+ pgd_t *pgd;
24617+ pud_t *pud;
24618+ pmd_t *pmd;
24619+ unsigned long addr, end;
24620+#endif
24621+#endif
24622+
24623+ gr_init_ebda();
24624+
24625+#ifdef CONFIG_PAX_KERNEXEC
24626+#ifdef CONFIG_X86_32
24627+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24628+ limit = (limit - 1UL) >> PAGE_SHIFT;
24629+
24630+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24631+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24632+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24633+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24634+ }
24635+
24636+ /* PaX: make KERNEL_CS read-only */
24637+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24638+ if (!paravirt_enabled())
24639+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24640+/*
24641+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24642+ pgd = pgd_offset_k(addr);
24643+ pud = pud_offset(pgd, addr);
24644+ pmd = pmd_offset(pud, addr);
24645+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24646+ }
24647+*/
24648+#ifdef CONFIG_X86_PAE
24649+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24650+/*
24651+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24652+ pgd = pgd_offset_k(addr);
24653+ pud = pud_offset(pgd, addr);
24654+ pmd = pmd_offset(pud, addr);
24655+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24656+ }
24657+*/
24658+#endif
24659+
24660+#ifdef CONFIG_MODULES
24661+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24662+#endif
24663+
24664+#else
24665+ /* PaX: make kernel code/rodata read-only, rest non-executable */
24666+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24667+ pgd = pgd_offset_k(addr);
24668+ pud = pud_offset(pgd, addr);
24669+ pmd = pmd_offset(pud, addr);
24670+ if (!pmd_present(*pmd))
24671+ continue;
24672+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24673+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24674+ else
24675+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24676+ }
24677+
24678+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24679+ end = addr + KERNEL_IMAGE_SIZE;
24680+ for (; addr < end; addr += PMD_SIZE) {
24681+ pgd = pgd_offset_k(addr);
24682+ pud = pud_offset(pgd, addr);
24683+ pmd = pmd_offset(pud, addr);
24684+ if (!pmd_present(*pmd))
24685+ continue;
24686+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24687+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24688+ }
24689+#endif
24690+
24691+ flush_tlb_all();
24692+#endif
24693+
24694 free_init_pages("unused kernel memory",
24695 (unsigned long)(&__init_begin),
24696 (unsigned long)(&__init_end));
24697diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24698index 575d86f..4987469 100644
24699--- a/arch/x86/mm/init_32.c
24700+++ b/arch/x86/mm/init_32.c
24701@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
24702 }
24703
24704 /*
24705- * Creates a middle page table and puts a pointer to it in the
24706- * given global directory entry. This only returns the gd entry
24707- * in non-PAE compilation mode, since the middle layer is folded.
24708- */
24709-static pmd_t * __init one_md_table_init(pgd_t *pgd)
24710-{
24711- pud_t *pud;
24712- pmd_t *pmd_table;
24713-
24714-#ifdef CONFIG_X86_PAE
24715- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24716- if (after_bootmem)
24717- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24718- else
24719- pmd_table = (pmd_t *)alloc_low_page();
24720- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24721- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24722- pud = pud_offset(pgd, 0);
24723- BUG_ON(pmd_table != pmd_offset(pud, 0));
24724-
24725- return pmd_table;
24726- }
24727-#endif
24728- pud = pud_offset(pgd, 0);
24729- pmd_table = pmd_offset(pud, 0);
24730-
24731- return pmd_table;
24732-}
24733-
24734-/*
24735 * Create a page table and place a pointer to it in a middle page
24736 * directory entry:
24737 */
24738@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24739 page_table = (pte_t *)alloc_low_page();
24740
24741 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24742+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24743+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24744+#else
24745 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24746+#endif
24747 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24748 }
24749
24750 return pte_offset_kernel(pmd, 0);
24751 }
24752
24753+static pmd_t * __init one_md_table_init(pgd_t *pgd)
24754+{
24755+ pud_t *pud;
24756+ pmd_t *pmd_table;
24757+
24758+ pud = pud_offset(pgd, 0);
24759+ pmd_table = pmd_offset(pud, 0);
24760+
24761+ return pmd_table;
24762+}
24763+
24764 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24765 {
24766 int pgd_idx = pgd_index(vaddr);
24767@@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24768 int pgd_idx, pmd_idx;
24769 unsigned long vaddr;
24770 pgd_t *pgd;
24771+ pud_t *pud;
24772 pmd_t *pmd;
24773 pte_t *pte = NULL;
24774
24775@@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24776 pgd = pgd_base + pgd_idx;
24777
24778 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24779- pmd = one_md_table_init(pgd);
24780- pmd = pmd + pmd_index(vaddr);
24781+ pud = pud_offset(pgd, vaddr);
24782+ pmd = pmd_offset(pud, vaddr);
24783+
24784+#ifdef CONFIG_X86_PAE
24785+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24786+#endif
24787+
24788 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24789 pmd++, pmd_idx++) {
24790 pte = page_table_kmap_check(one_page_table_init(pmd),
24791@@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24792 }
24793 }
24794
24795-static inline int is_kernel_text(unsigned long addr)
24796+static inline int is_kernel_text(unsigned long start, unsigned long end)
24797 {
24798- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24799- return 1;
24800- return 0;
24801+ if ((start > ktla_ktva((unsigned long)_etext) ||
24802+ end <= ktla_ktva((unsigned long)_stext)) &&
24803+ (start > ktla_ktva((unsigned long)_einittext) ||
24804+ end <= ktla_ktva((unsigned long)_sinittext)) &&
24805+
24806+#ifdef CONFIG_ACPI_SLEEP
24807+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24808+#endif
24809+
24810+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24811+ return 0;
24812+ return 1;
24813 }
24814
24815 /*
24816@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
24817 unsigned long last_map_addr = end;
24818 unsigned long start_pfn, end_pfn;
24819 pgd_t *pgd_base = swapper_pg_dir;
24820- int pgd_idx, pmd_idx, pte_ofs;
24821+ unsigned int pgd_idx, pmd_idx, pte_ofs;
24822 unsigned long pfn;
24823 pgd_t *pgd;
24824+ pud_t *pud;
24825 pmd_t *pmd;
24826 pte_t *pte;
24827 unsigned pages_2m, pages_4k;
24828@@ -280,8 +281,13 @@ repeat:
24829 pfn = start_pfn;
24830 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24831 pgd = pgd_base + pgd_idx;
24832- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24833- pmd = one_md_table_init(pgd);
24834+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24835+ pud = pud_offset(pgd, 0);
24836+ pmd = pmd_offset(pud, 0);
24837+
24838+#ifdef CONFIG_X86_PAE
24839+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24840+#endif
24841
24842 if (pfn >= end_pfn)
24843 continue;
24844@@ -293,14 +299,13 @@ repeat:
24845 #endif
24846 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24847 pmd++, pmd_idx++) {
24848- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24849+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24850
24851 /*
24852 * Map with big pages if possible, otherwise
24853 * create normal page tables:
24854 */
24855 if (use_pse) {
24856- unsigned int addr2;
24857 pgprot_t prot = PAGE_KERNEL_LARGE;
24858 /*
24859 * first pass will use the same initial
24860@@ -310,11 +315,7 @@ repeat:
24861 __pgprot(PTE_IDENT_ATTR |
24862 _PAGE_PSE);
24863
24864- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24865- PAGE_OFFSET + PAGE_SIZE-1;
24866-
24867- if (is_kernel_text(addr) ||
24868- is_kernel_text(addr2))
24869+ if (is_kernel_text(address, address + PMD_SIZE))
24870 prot = PAGE_KERNEL_LARGE_EXEC;
24871
24872 pages_2m++;
24873@@ -331,7 +332,7 @@ repeat:
24874 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24875 pte += pte_ofs;
24876 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24877- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24878+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24879 pgprot_t prot = PAGE_KERNEL;
24880 /*
24881 * first pass will use the same initial
24882@@ -339,7 +340,7 @@ repeat:
24883 */
24884 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24885
24886- if (is_kernel_text(addr))
24887+ if (is_kernel_text(address, address + PAGE_SIZE))
24888 prot = PAGE_KERNEL_EXEC;
24889
24890 pages_4k++;
24891@@ -465,7 +466,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24892
24893 pud = pud_offset(pgd, va);
24894 pmd = pmd_offset(pud, va);
24895- if (!pmd_present(*pmd))
24896+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
24897 break;
24898
24899 pte = pte_offset_kernel(pmd, va);
24900@@ -517,12 +518,10 @@ void __init early_ioremap_page_table_range_init(void)
24901
24902 static void __init pagetable_init(void)
24903 {
24904- pgd_t *pgd_base = swapper_pg_dir;
24905-
24906- permanent_kmaps_init(pgd_base);
24907+ permanent_kmaps_init(swapper_pg_dir);
24908 }
24909
24910-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24911+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24912 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24913
24914 /* user-defined highmem size */
24915@@ -734,6 +733,12 @@ void __init mem_init(void)
24916
24917 pci_iommu_alloc();
24918
24919+#ifdef CONFIG_PAX_PER_CPU_PGD
24920+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24921+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24922+ KERNEL_PGD_PTRS);
24923+#endif
24924+
24925 #ifdef CONFIG_FLATMEM
24926 BUG_ON(!mem_map);
24927 #endif
24928@@ -760,7 +765,7 @@ void __init mem_init(void)
24929 reservedpages++;
24930
24931 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24932- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24933+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24934 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24935
24936 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24937@@ -801,10 +806,10 @@ void __init mem_init(void)
24938 ((unsigned long)&__init_end -
24939 (unsigned long)&__init_begin) >> 10,
24940
24941- (unsigned long)&_etext, (unsigned long)&_edata,
24942- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24943+ (unsigned long)&_sdata, (unsigned long)&_edata,
24944+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24945
24946- (unsigned long)&_text, (unsigned long)&_etext,
24947+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24948 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24949
24950 /*
24951@@ -882,6 +887,7 @@ void set_kernel_text_rw(void)
24952 if (!kernel_set_to_readonly)
24953 return;
24954
24955+ start = ktla_ktva(start);
24956 pr_debug("Set kernel text: %lx - %lx for read write\n",
24957 start, start+size);
24958
24959@@ -896,6 +902,7 @@ void set_kernel_text_ro(void)
24960 if (!kernel_set_to_readonly)
24961 return;
24962
24963+ start = ktla_ktva(start);
24964 pr_debug("Set kernel text: %lx - %lx for read only\n",
24965 start, start+size);
24966
24967@@ -924,6 +931,7 @@ void mark_rodata_ro(void)
24968 unsigned long start = PFN_ALIGN(_text);
24969 unsigned long size = PFN_ALIGN(_etext) - start;
24970
24971+ start = ktla_ktva(start);
24972 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
24973 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
24974 size >> 10);
24975diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
24976index 2b6b4a3..c17210d 100644
24977--- a/arch/x86/mm/init_64.c
24978+++ b/arch/x86/mm/init_64.c
24979@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
24980 * around without checking the pgd every time.
24981 */
24982
24983-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
24984+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
24985 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24986
24987 int force_personality32;
24988@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24989
24990 for (address = start; address <= end; address += PGDIR_SIZE) {
24991 const pgd_t *pgd_ref = pgd_offset_k(address);
24992+
24993+#ifdef CONFIG_PAX_PER_CPU_PGD
24994+ unsigned long cpu;
24995+#else
24996 struct page *page;
24997+#endif
24998
24999 if (pgd_none(*pgd_ref))
25000 continue;
25001
25002 spin_lock(&pgd_lock);
25003+
25004+#ifdef CONFIG_PAX_PER_CPU_PGD
25005+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25006+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
25007+#else
25008 list_for_each_entry(page, &pgd_list, lru) {
25009 pgd_t *pgd;
25010 spinlock_t *pgt_lock;
25011@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25012 /* the pgt_lock only for Xen */
25013 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
25014 spin_lock(pgt_lock);
25015+#endif
25016
25017 if (pgd_none(*pgd))
25018 set_pgd(pgd, *pgd_ref);
25019@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25020 BUG_ON(pgd_page_vaddr(*pgd)
25021 != pgd_page_vaddr(*pgd_ref));
25022
25023+#ifndef CONFIG_PAX_PER_CPU_PGD
25024 spin_unlock(pgt_lock);
25025+#endif
25026+
25027 }
25028 spin_unlock(&pgd_lock);
25029 }
25030@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
25031 {
25032 if (pgd_none(*pgd)) {
25033 pud_t *pud = (pud_t *)spp_getpage();
25034- pgd_populate(&init_mm, pgd, pud);
25035+ pgd_populate_kernel(&init_mm, pgd, pud);
25036 if (pud != pud_offset(pgd, 0))
25037 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
25038 pud, pud_offset(pgd, 0));
25039@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
25040 {
25041 if (pud_none(*pud)) {
25042 pmd_t *pmd = (pmd_t *) spp_getpage();
25043- pud_populate(&init_mm, pud, pmd);
25044+ pud_populate_kernel(&init_mm, pud, pmd);
25045 if (pmd != pmd_offset(pud, 0))
25046 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
25047 pmd, pmd_offset(pud, 0));
25048@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25049 pmd = fill_pmd(pud, vaddr);
25050 pte = fill_pte(pmd, vaddr);
25051
25052+ pax_open_kernel();
25053 set_pte(pte, new_pte);
25054+ pax_close_kernel();
25055
25056 /*
25057 * It's enough to flush this one mapping.
25058@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25059 pgd = pgd_offset_k((unsigned long)__va(phys));
25060 if (pgd_none(*pgd)) {
25061 pud = (pud_t *) spp_getpage();
25062- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25063- _PAGE_USER));
25064+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25065 }
25066 pud = pud_offset(pgd, (unsigned long)__va(phys));
25067 if (pud_none(*pud)) {
25068 pmd = (pmd_t *) spp_getpage();
25069- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25070- _PAGE_USER));
25071+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25072 }
25073 pmd = pmd_offset(pud, phys);
25074 BUG_ON(!pmd_none(*pmd));
25075@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
25076 if (pfn >= pgt_buf_top)
25077 panic("alloc_low_page: ran out of memory");
25078
25079- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25080+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25081 clear_page(adr);
25082 *phys = pfn * PAGE_SIZE;
25083 return adr;
25084@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
25085
25086 phys = __pa(virt);
25087 left = phys & (PAGE_SIZE - 1);
25088- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25089+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25090 adr = (void *)(((unsigned long)adr) | left);
25091
25092 return adr;
25093@@ -548,7 +562,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
25094 unmap_low_page(pmd);
25095
25096 spin_lock(&init_mm.page_table_lock);
25097- pud_populate(&init_mm, pud, __va(pmd_phys));
25098+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
25099 spin_unlock(&init_mm.page_table_lock);
25100 }
25101 __flush_tlb_all();
25102@@ -594,7 +608,7 @@ kernel_physical_mapping_init(unsigned long start,
25103 unmap_low_page(pud);
25104
25105 spin_lock(&init_mm.page_table_lock);
25106- pgd_populate(&init_mm, pgd, __va(pud_phys));
25107+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
25108 spin_unlock(&init_mm.page_table_lock);
25109 pgd_changed = true;
25110 }
25111@@ -686,6 +700,12 @@ void __init mem_init(void)
25112
25113 pci_iommu_alloc();
25114
25115+#ifdef CONFIG_PAX_PER_CPU_PGD
25116+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25117+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25118+ KERNEL_PGD_PTRS);
25119+#endif
25120+
25121 /* clear_bss() already clear the empty_zero_page */
25122
25123 reservedpages = 0;
25124@@ -846,8 +866,8 @@ int kern_addr_valid(unsigned long addr)
25125 static struct vm_area_struct gate_vma = {
25126 .vm_start = VSYSCALL_START,
25127 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25128- .vm_page_prot = PAGE_READONLY_EXEC,
25129- .vm_flags = VM_READ | VM_EXEC
25130+ .vm_page_prot = PAGE_READONLY,
25131+ .vm_flags = VM_READ
25132 };
25133
25134 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
25135@@ -881,7 +901,7 @@ int in_gate_area_no_mm(unsigned long addr)
25136
25137 const char *arch_vma_name(struct vm_area_struct *vma)
25138 {
25139- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25140+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25141 return "[vdso]";
25142 if (vma == &gate_vma)
25143 return "[vsyscall]";
25144diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25145index 7b179b4..6bd1777 100644
25146--- a/arch/x86/mm/iomap_32.c
25147+++ b/arch/x86/mm/iomap_32.c
25148@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
25149 type = kmap_atomic_idx_push();
25150 idx = type + KM_TYPE_NR * smp_processor_id();
25151 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25152+
25153+ pax_open_kernel();
25154 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25155+ pax_close_kernel();
25156+
25157 arch_flush_lazy_mmu_mode();
25158
25159 return (void *)vaddr;
25160diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25161index 78fe3f1..8293b6f 100644
25162--- a/arch/x86/mm/ioremap.c
25163+++ b/arch/x86/mm/ioremap.c
25164@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25165 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
25166 int is_ram = page_is_ram(pfn);
25167
25168- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25169+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25170 return NULL;
25171 WARN_ON_ONCE(is_ram);
25172 }
25173@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25174
25175 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25176 if (page_is_ram(start >> PAGE_SHIFT))
25177+#ifdef CONFIG_HIGHMEM
25178+ if ((start >> PAGE_SHIFT) < max_low_pfn)
25179+#endif
25180 return __va(phys);
25181
25182 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
25183@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
25184 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25185
25186 static __initdata int after_paging_init;
25187-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25188+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25189
25190 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25191 {
25192@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
25193 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25194
25195 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25196- memset(bm_pte, 0, sizeof(bm_pte));
25197- pmd_populate_kernel(&init_mm, pmd, bm_pte);
25198+ pmd_populate_user(&init_mm, pmd, bm_pte);
25199
25200 /*
25201 * The boot-ioremap range spans multiple pmds, for which
25202diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25203index d87dd6d..bf3fa66 100644
25204--- a/arch/x86/mm/kmemcheck/kmemcheck.c
25205+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25206@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25207 * memory (e.g. tracked pages)? For now, we need this to avoid
25208 * invoking kmemcheck for PnP BIOS calls.
25209 */
25210- if (regs->flags & X86_VM_MASK)
25211+ if (v8086_mode(regs))
25212 return false;
25213- if (regs->cs != __KERNEL_CS)
25214+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25215 return false;
25216
25217 pte = kmemcheck_pte_lookup(address);
25218diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25219index 845df68..1d8d29f 100644
25220--- a/arch/x86/mm/mmap.c
25221+++ b/arch/x86/mm/mmap.c
25222@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
25223 * Leave an at least ~128 MB hole with possible stack randomization.
25224 */
25225 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25226-#define MAX_GAP (TASK_SIZE/6*5)
25227+#define MAX_GAP (pax_task_size/6*5)
25228
25229 static int mmap_is_legacy(void)
25230 {
25231@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
25232 return rnd << PAGE_SHIFT;
25233 }
25234
25235-static unsigned long mmap_base(void)
25236+static unsigned long mmap_base(struct mm_struct *mm)
25237 {
25238 unsigned long gap = rlimit(RLIMIT_STACK);
25239+ unsigned long pax_task_size = TASK_SIZE;
25240+
25241+#ifdef CONFIG_PAX_SEGMEXEC
25242+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25243+ pax_task_size = SEGMEXEC_TASK_SIZE;
25244+#endif
25245
25246 if (gap < MIN_GAP)
25247 gap = MIN_GAP;
25248 else if (gap > MAX_GAP)
25249 gap = MAX_GAP;
25250
25251- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25252+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25253 }
25254
25255 /*
25256 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25257 * does, but not when emulating X86_32
25258 */
25259-static unsigned long mmap_legacy_base(void)
25260+static unsigned long mmap_legacy_base(struct mm_struct *mm)
25261 {
25262- if (mmap_is_ia32())
25263+ if (mmap_is_ia32()) {
25264+
25265+#ifdef CONFIG_PAX_SEGMEXEC
25266+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25267+ return SEGMEXEC_TASK_UNMAPPED_BASE;
25268+ else
25269+#endif
25270+
25271 return TASK_UNMAPPED_BASE;
25272- else
25273+ } else
25274 return TASK_UNMAPPED_BASE + mmap_rnd();
25275 }
25276
25277@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
25278 void arch_pick_mmap_layout(struct mm_struct *mm)
25279 {
25280 if (mmap_is_legacy()) {
25281- mm->mmap_base = mmap_legacy_base();
25282+ mm->mmap_base = mmap_legacy_base(mm);
25283+
25284+#ifdef CONFIG_PAX_RANDMMAP
25285+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25286+ mm->mmap_base += mm->delta_mmap;
25287+#endif
25288+
25289 mm->get_unmapped_area = arch_get_unmapped_area;
25290 mm->unmap_area = arch_unmap_area;
25291 } else {
25292- mm->mmap_base = mmap_base();
25293+ mm->mmap_base = mmap_base(mm);
25294+
25295+#ifdef CONFIG_PAX_RANDMMAP
25296+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25297+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25298+#endif
25299+
25300 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25301 mm->unmap_area = arch_unmap_area_topdown;
25302 }
25303diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25304index dc0b727..dc9d71a 100644
25305--- a/arch/x86/mm/mmio-mod.c
25306+++ b/arch/x86/mm/mmio-mod.c
25307@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25308 break;
25309 default:
25310 {
25311- unsigned char *ip = (unsigned char *)instptr;
25312+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25313 my_trace->opcode = MMIO_UNKNOWN_OP;
25314 my_trace->width = 0;
25315 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25316@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25317 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25318 void __iomem *addr)
25319 {
25320- static atomic_t next_id;
25321+ static atomic_unchecked_t next_id;
25322 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25323 /* These are page-unaligned. */
25324 struct mmiotrace_map map = {
25325@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25326 .private = trace
25327 },
25328 .phys = offset,
25329- .id = atomic_inc_return(&next_id)
25330+ .id = atomic_inc_return_unchecked(&next_id)
25331 };
25332 map.map_id = trace->id;
25333
25334diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25335index b008656..773eac2 100644
25336--- a/arch/x86/mm/pageattr-test.c
25337+++ b/arch/x86/mm/pageattr-test.c
25338@@ -36,7 +36,7 @@ enum {
25339
25340 static int pte_testbit(pte_t pte)
25341 {
25342- return pte_flags(pte) & _PAGE_UNUSED1;
25343+ return pte_flags(pte) & _PAGE_CPA_TEST;
25344 }
25345
25346 struct split_state {
25347diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25348index a718e0d..45efc32 100644
25349--- a/arch/x86/mm/pageattr.c
25350+++ b/arch/x86/mm/pageattr.c
25351@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25352 */
25353 #ifdef CONFIG_PCI_BIOS
25354 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25355- pgprot_val(forbidden) |= _PAGE_NX;
25356+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25357 #endif
25358
25359 /*
25360@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25361 * Does not cover __inittext since that is gone later on. On
25362 * 64bit we do not enforce !NX on the low mapping
25363 */
25364- if (within(address, (unsigned long)_text, (unsigned long)_etext))
25365- pgprot_val(forbidden) |= _PAGE_NX;
25366+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25367+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25368
25369+#ifdef CONFIG_DEBUG_RODATA
25370 /*
25371 * The .rodata section needs to be read-only. Using the pfn
25372 * catches all aliases.
25373@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25374 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25375 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25376 pgprot_val(forbidden) |= _PAGE_RW;
25377+#endif
25378
25379 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25380 /*
25381@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25382 }
25383 #endif
25384
25385+#ifdef CONFIG_PAX_KERNEXEC
25386+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25387+ pgprot_val(forbidden) |= _PAGE_RW;
25388+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25389+ }
25390+#endif
25391+
25392 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25393
25394 return prot;
25395@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25396 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25397 {
25398 /* change init_mm */
25399+ pax_open_kernel();
25400 set_pte_atomic(kpte, pte);
25401+
25402 #ifdef CONFIG_X86_32
25403 if (!SHARED_KERNEL_PMD) {
25404+
25405+#ifdef CONFIG_PAX_PER_CPU_PGD
25406+ unsigned long cpu;
25407+#else
25408 struct page *page;
25409+#endif
25410
25411+#ifdef CONFIG_PAX_PER_CPU_PGD
25412+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25413+ pgd_t *pgd = get_cpu_pgd(cpu);
25414+#else
25415 list_for_each_entry(page, &pgd_list, lru) {
25416- pgd_t *pgd;
25417+ pgd_t *pgd = (pgd_t *)page_address(page);
25418+#endif
25419+
25420 pud_t *pud;
25421 pmd_t *pmd;
25422
25423- pgd = (pgd_t *)page_address(page) + pgd_index(address);
25424+ pgd += pgd_index(address);
25425 pud = pud_offset(pgd, address);
25426 pmd = pmd_offset(pud, address);
25427 set_pte_atomic((pte_t *)pmd, pte);
25428 }
25429 }
25430 #endif
25431+ pax_close_kernel();
25432 }
25433
25434 static int
25435diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25436index 3d68ef6..7f69136 100644
25437--- a/arch/x86/mm/pat.c
25438+++ b/arch/x86/mm/pat.c
25439@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
25440
25441 if (!entry) {
25442 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
25443- current->comm, current->pid, start, end - 1);
25444+ current->comm, task_pid_nr(current), start, end - 1);
25445 return -EINVAL;
25446 }
25447
25448@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25449
25450 while (cursor < to) {
25451 if (!devmem_is_allowed(pfn)) {
25452- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
25453- current->comm, from, to - 1);
25454+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
25455+ current->comm, from, to - 1, cursor);
25456 return 0;
25457 }
25458 cursor += PAGE_SIZE;
25459@@ -570,7 +570,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25460 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
25461 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
25462 "for [mem %#010Lx-%#010Lx]\n",
25463- current->comm, current->pid,
25464+ current->comm, task_pid_nr(current),
25465 cattr_name(flags),
25466 base, (unsigned long long)(base + size-1));
25467 return -EINVAL;
25468@@ -605,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25469 flags = lookup_memtype(paddr);
25470 if (want_flags != flags) {
25471 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
25472- current->comm, current->pid,
25473+ current->comm, task_pid_nr(current),
25474 cattr_name(want_flags),
25475 (unsigned long long)paddr,
25476 (unsigned long long)(paddr + size - 1),
25477@@ -627,7 +627,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25478 free_memtype(paddr, paddr + size);
25479 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25480 " for [mem %#010Lx-%#010Lx], got %s\n",
25481- current->comm, current->pid,
25482+ current->comm, task_pid_nr(current),
25483 cattr_name(want_flags),
25484 (unsigned long long)paddr,
25485 (unsigned long long)(paddr + size - 1),
25486diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25487index 9f0614d..92ae64a 100644
25488--- a/arch/x86/mm/pf_in.c
25489+++ b/arch/x86/mm/pf_in.c
25490@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25491 int i;
25492 enum reason_type rv = OTHERS;
25493
25494- p = (unsigned char *)ins_addr;
25495+ p = (unsigned char *)ktla_ktva(ins_addr);
25496 p += skip_prefix(p, &prf);
25497 p += get_opcode(p, &opcode);
25498
25499@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25500 struct prefix_bits prf;
25501 int i;
25502
25503- p = (unsigned char *)ins_addr;
25504+ p = (unsigned char *)ktla_ktva(ins_addr);
25505 p += skip_prefix(p, &prf);
25506 p += get_opcode(p, &opcode);
25507
25508@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25509 struct prefix_bits prf;
25510 int i;
25511
25512- p = (unsigned char *)ins_addr;
25513+ p = (unsigned char *)ktla_ktva(ins_addr);
25514 p += skip_prefix(p, &prf);
25515 p += get_opcode(p, &opcode);
25516
25517@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25518 struct prefix_bits prf;
25519 int i;
25520
25521- p = (unsigned char *)ins_addr;
25522+ p = (unsigned char *)ktla_ktva(ins_addr);
25523 p += skip_prefix(p, &prf);
25524 p += get_opcode(p, &opcode);
25525 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25526@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25527 struct prefix_bits prf;
25528 int i;
25529
25530- p = (unsigned char *)ins_addr;
25531+ p = (unsigned char *)ktla_ktva(ins_addr);
25532 p += skip_prefix(p, &prf);
25533 p += get_opcode(p, &opcode);
25534 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25535diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25536index 8573b83..4f3ed7e 100644
25537--- a/arch/x86/mm/pgtable.c
25538+++ b/arch/x86/mm/pgtable.c
25539@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
25540 list_del(&page->lru);
25541 }
25542
25543-#define UNSHARED_PTRS_PER_PGD \
25544- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25545+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25546+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25547
25548+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
25549+{
25550+ unsigned int count = USER_PGD_PTRS;
25551
25552+ while (count--)
25553+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25554+}
25555+#endif
25556+
25557+#ifdef CONFIG_PAX_PER_CPU_PGD
25558+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
25559+{
25560+ unsigned int count = USER_PGD_PTRS;
25561+
25562+ while (count--) {
25563+ pgd_t pgd;
25564+
25565+#ifdef CONFIG_X86_64
25566+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
25567+#else
25568+ pgd = *src++;
25569+#endif
25570+
25571+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25572+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
25573+#endif
25574+
25575+ *dst++ = pgd;
25576+ }
25577+
25578+}
25579+#endif
25580+
25581+#ifdef CONFIG_X86_64
25582+#define pxd_t pud_t
25583+#define pyd_t pgd_t
25584+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25585+#define pxd_free(mm, pud) pud_free((mm), (pud))
25586+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25587+#define pyd_offset(mm, address) pgd_offset((mm), (address))
25588+#define PYD_SIZE PGDIR_SIZE
25589+#else
25590+#define pxd_t pmd_t
25591+#define pyd_t pud_t
25592+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25593+#define pxd_free(mm, pud) pmd_free((mm), (pud))
25594+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25595+#define pyd_offset(mm, address) pud_offset((mm), (address))
25596+#define PYD_SIZE PUD_SIZE
25597+#endif
25598+
25599+#ifdef CONFIG_PAX_PER_CPU_PGD
25600+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25601+static inline void pgd_dtor(pgd_t *pgd) {}
25602+#else
25603 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
25604 {
25605 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
25606@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
25607 pgd_list_del(pgd);
25608 spin_unlock(&pgd_lock);
25609 }
25610+#endif
25611
25612 /*
25613 * List of all pgd's needed for non-PAE so it can invalidate entries
25614@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
25615 * -- wli
25616 */
25617
25618-#ifdef CONFIG_X86_PAE
25619+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25620 /*
25621 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25622 * updating the top-level pagetable entries to guarantee the
25623@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
25624 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25625 * and initialize the kernel pmds here.
25626 */
25627-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25628+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25629
25630 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25631 {
25632@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25633 */
25634 flush_tlb_mm(mm);
25635 }
25636+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25637+#define PREALLOCATED_PXDS USER_PGD_PTRS
25638 #else /* !CONFIG_X86_PAE */
25639
25640 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25641-#define PREALLOCATED_PMDS 0
25642+#define PREALLOCATED_PXDS 0
25643
25644 #endif /* CONFIG_X86_PAE */
25645
25646-static void free_pmds(pmd_t *pmds[])
25647+static void free_pxds(pxd_t *pxds[])
25648 {
25649 int i;
25650
25651- for(i = 0; i < PREALLOCATED_PMDS; i++)
25652- if (pmds[i])
25653- free_page((unsigned long)pmds[i]);
25654+ for(i = 0; i < PREALLOCATED_PXDS; i++)
25655+ if (pxds[i])
25656+ free_page((unsigned long)pxds[i]);
25657 }
25658
25659-static int preallocate_pmds(pmd_t *pmds[])
25660+static int preallocate_pxds(pxd_t *pxds[])
25661 {
25662 int i;
25663 bool failed = false;
25664
25665- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25666- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25667- if (pmd == NULL)
25668+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25669+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25670+ if (pxd == NULL)
25671 failed = true;
25672- pmds[i] = pmd;
25673+ pxds[i] = pxd;
25674 }
25675
25676 if (failed) {
25677- free_pmds(pmds);
25678+ free_pxds(pxds);
25679 return -ENOMEM;
25680 }
25681
25682@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
25683 * preallocate which never got a corresponding vma will need to be
25684 * freed manually.
25685 */
25686-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25687+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25688 {
25689 int i;
25690
25691- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25692+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25693 pgd_t pgd = pgdp[i];
25694
25695 if (pgd_val(pgd) != 0) {
25696- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25697+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25698
25699- pgdp[i] = native_make_pgd(0);
25700+ set_pgd(pgdp + i, native_make_pgd(0));
25701
25702- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25703- pmd_free(mm, pmd);
25704+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25705+ pxd_free(mm, pxd);
25706 }
25707 }
25708 }
25709
25710-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25711+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25712 {
25713- pud_t *pud;
25714+ pyd_t *pyd;
25715 unsigned long addr;
25716 int i;
25717
25718- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25719+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25720 return;
25721
25722- pud = pud_offset(pgd, 0);
25723+#ifdef CONFIG_X86_64
25724+ pyd = pyd_offset(mm, 0L);
25725+#else
25726+ pyd = pyd_offset(pgd, 0L);
25727+#endif
25728
25729- for (addr = i = 0; i < PREALLOCATED_PMDS;
25730- i++, pud++, addr += PUD_SIZE) {
25731- pmd_t *pmd = pmds[i];
25732+ for (addr = i = 0; i < PREALLOCATED_PXDS;
25733+ i++, pyd++, addr += PYD_SIZE) {
25734+ pxd_t *pxd = pxds[i];
25735
25736 if (i >= KERNEL_PGD_BOUNDARY)
25737- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25738- sizeof(pmd_t) * PTRS_PER_PMD);
25739+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25740+ sizeof(pxd_t) * PTRS_PER_PMD);
25741
25742- pud_populate(mm, pud, pmd);
25743+ pyd_populate(mm, pyd, pxd);
25744 }
25745 }
25746
25747 pgd_t *pgd_alloc(struct mm_struct *mm)
25748 {
25749 pgd_t *pgd;
25750- pmd_t *pmds[PREALLOCATED_PMDS];
25751+ pxd_t *pxds[PREALLOCATED_PXDS];
25752
25753 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25754
25755@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25756
25757 mm->pgd = pgd;
25758
25759- if (preallocate_pmds(pmds) != 0)
25760+ if (preallocate_pxds(pxds) != 0)
25761 goto out_free_pgd;
25762
25763 if (paravirt_pgd_alloc(mm) != 0)
25764- goto out_free_pmds;
25765+ goto out_free_pxds;
25766
25767 /*
25768 * Make sure that pre-populating the pmds is atomic with
25769@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25770 spin_lock(&pgd_lock);
25771
25772 pgd_ctor(mm, pgd);
25773- pgd_prepopulate_pmd(mm, pgd, pmds);
25774+ pgd_prepopulate_pxd(mm, pgd, pxds);
25775
25776 spin_unlock(&pgd_lock);
25777
25778 return pgd;
25779
25780-out_free_pmds:
25781- free_pmds(pmds);
25782+out_free_pxds:
25783+ free_pxds(pxds);
25784 out_free_pgd:
25785 free_page((unsigned long)pgd);
25786 out:
25787@@ -295,7 +356,7 @@ out:
25788
25789 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25790 {
25791- pgd_mop_up_pmds(mm, pgd);
25792+ pgd_mop_up_pxds(mm, pgd);
25793 pgd_dtor(pgd);
25794 paravirt_pgd_free(mm, pgd);
25795 free_page((unsigned long)pgd);
25796diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25797index a69bcb8..19068ab 100644
25798--- a/arch/x86/mm/pgtable_32.c
25799+++ b/arch/x86/mm/pgtable_32.c
25800@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25801 return;
25802 }
25803 pte = pte_offset_kernel(pmd, vaddr);
25804+
25805+ pax_open_kernel();
25806 if (pte_val(pteval))
25807 set_pte_at(&init_mm, vaddr, pte, pteval);
25808 else
25809 pte_clear(&init_mm, vaddr, pte);
25810+ pax_close_kernel();
25811
25812 /*
25813 * It's enough to flush this one mapping.
25814diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25815index 410531d..0f16030 100644
25816--- a/arch/x86/mm/setup_nx.c
25817+++ b/arch/x86/mm/setup_nx.c
25818@@ -5,8 +5,10 @@
25819 #include <asm/pgtable.h>
25820 #include <asm/proto.h>
25821
25822+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25823 static int disable_nx __cpuinitdata;
25824
25825+#ifndef CONFIG_PAX_PAGEEXEC
25826 /*
25827 * noexec = on|off
25828 *
25829@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25830 return 0;
25831 }
25832 early_param("noexec", noexec_setup);
25833+#endif
25834+
25835+#endif
25836
25837 void __cpuinit x86_configure_nx(void)
25838 {
25839+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25840 if (cpu_has_nx && !disable_nx)
25841 __supported_pte_mask |= _PAGE_NX;
25842 else
25843+#endif
25844 __supported_pte_mask &= ~_PAGE_NX;
25845 }
25846
25847diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25848index 5e57e11..64874249 100644
25849--- a/arch/x86/mm/tlb.c
25850+++ b/arch/x86/mm/tlb.c
25851@@ -66,7 +66,11 @@ void leave_mm(int cpu)
25852 BUG();
25853 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
25854 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
25855+
25856+#ifndef CONFIG_PAX_PER_CPU_PGD
25857 load_cr3(swapper_pg_dir);
25858+#endif
25859+
25860 }
25861 }
25862 EXPORT_SYMBOL_GPL(leave_mm);
25863diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25864index 877b9a1..a8ecf42 100644
25865--- a/arch/x86/net/bpf_jit.S
25866+++ b/arch/x86/net/bpf_jit.S
25867@@ -9,6 +9,7 @@
25868 */
25869 #include <linux/linkage.h>
25870 #include <asm/dwarf2.h>
25871+#include <asm/alternative-asm.h>
25872
25873 /*
25874 * Calling convention :
25875@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
25876 jle bpf_slow_path_word
25877 mov (SKBDATA,%rsi),%eax
25878 bswap %eax /* ntohl() */
25879+ pax_force_retaddr
25880 ret
25881
25882 sk_load_half:
25883@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
25884 jle bpf_slow_path_half
25885 movzwl (SKBDATA,%rsi),%eax
25886 rol $8,%ax # ntohs()
25887+ pax_force_retaddr
25888 ret
25889
25890 sk_load_byte:
25891@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
25892 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25893 jle bpf_slow_path_byte
25894 movzbl (SKBDATA,%rsi),%eax
25895+ pax_force_retaddr
25896 ret
25897
25898 /**
25899@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
25900 movzbl (SKBDATA,%rsi),%ebx
25901 and $15,%bl
25902 shl $2,%bl
25903+ pax_force_retaddr
25904 ret
25905
25906 /* rsi contains offset and can be scratched */
25907@@ -109,6 +114,7 @@ bpf_slow_path_word:
25908 js bpf_error
25909 mov -12(%rbp),%eax
25910 bswap %eax
25911+ pax_force_retaddr
25912 ret
25913
25914 bpf_slow_path_half:
25915@@ -117,12 +123,14 @@ bpf_slow_path_half:
25916 mov -12(%rbp),%ax
25917 rol $8,%ax
25918 movzwl %ax,%eax
25919+ pax_force_retaddr
25920 ret
25921
25922 bpf_slow_path_byte:
25923 bpf_slow_path_common(1)
25924 js bpf_error
25925 movzbl -12(%rbp),%eax
25926+ pax_force_retaddr
25927 ret
25928
25929 bpf_slow_path_byte_msh:
25930@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
25931 and $15,%al
25932 shl $2,%al
25933 xchg %eax,%ebx
25934+ pax_force_retaddr
25935 ret
25936
25937 #define sk_negative_common(SIZE) \
25938@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
25939 sk_negative_common(4)
25940 mov (%rax), %eax
25941 bswap %eax
25942+ pax_force_retaddr
25943 ret
25944
25945 bpf_slow_path_half_neg:
25946@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
25947 mov (%rax),%ax
25948 rol $8,%ax
25949 movzwl %ax,%eax
25950+ pax_force_retaddr
25951 ret
25952
25953 bpf_slow_path_byte_neg:
25954@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
25955 .globl sk_load_byte_negative_offset
25956 sk_negative_common(1)
25957 movzbl (%rax), %eax
25958+ pax_force_retaddr
25959 ret
25960
25961 bpf_slow_path_byte_msh_neg:
25962@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
25963 and $15,%al
25964 shl $2,%al
25965 xchg %eax,%ebx
25966+ pax_force_retaddr
25967 ret
25968
25969 bpf_error:
25970@@ -197,4 +210,5 @@ bpf_error:
25971 xor %eax,%eax
25972 mov -8(%rbp),%rbx
25973 leaveq
25974+ pax_force_retaddr
25975 ret
25976diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
25977index 0597f95..a12c36e 100644
25978--- a/arch/x86/net/bpf_jit_comp.c
25979+++ b/arch/x86/net/bpf_jit_comp.c
25980@@ -120,6 +120,11 @@ static inline void bpf_flush_icache(void *start, void *end)
25981 set_fs(old_fs);
25982 }
25983
25984+struct bpf_jit_work {
25985+ struct work_struct work;
25986+ void *image;
25987+};
25988+
25989 #define CHOOSE_LOAD_FUNC(K, func) \
25990 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
25991
25992@@ -146,6 +151,10 @@ void bpf_jit_compile(struct sk_filter *fp)
25993 if (addrs == NULL)
25994 return;
25995
25996+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
25997+ if (!fp->work)
25998+ goto out;
25999+
26000 /* Before first pass, make a rough estimation of addrs[]
26001 * each bpf instruction is translated to less than 64 bytes
26002 */
26003@@ -589,17 +598,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26004 break;
26005 default:
26006 /* hmm, too complex filter, give up with jit compiler */
26007- goto out;
26008+ goto error;
26009 }
26010 ilen = prog - temp;
26011 if (image) {
26012 if (unlikely(proglen + ilen > oldproglen)) {
26013 pr_err("bpb_jit_compile fatal error\n");
26014- kfree(addrs);
26015- module_free(NULL, image);
26016- return;
26017+ module_free_exec(NULL, image);
26018+ goto error;
26019 }
26020+ pax_open_kernel();
26021 memcpy(image + proglen, temp, ilen);
26022+ pax_close_kernel();
26023 }
26024 proglen += ilen;
26025 addrs[i] = proglen;
26026@@ -620,11 +630,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26027 break;
26028 }
26029 if (proglen == oldproglen) {
26030- image = module_alloc(max_t(unsigned int,
26031- proglen,
26032- sizeof(struct work_struct)));
26033+ image = module_alloc_exec(proglen);
26034 if (!image)
26035- goto out;
26036+ goto error;
26037 }
26038 oldproglen = proglen;
26039 }
26040@@ -640,7 +648,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26041 bpf_flush_icache(image, image + proglen);
26042
26043 fp->bpf_func = (void *)image;
26044- }
26045+ } else
26046+error:
26047+ kfree(fp->work);
26048+
26049 out:
26050 kfree(addrs);
26051 return;
26052@@ -648,18 +659,20 @@ out:
26053
26054 static void jit_free_defer(struct work_struct *arg)
26055 {
26056- module_free(NULL, arg);
26057+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
26058+ kfree(arg);
26059 }
26060
26061 /* run from softirq, we must use a work_struct to call
26062- * module_free() from process context
26063+ * module_free_exec() from process context
26064 */
26065 void bpf_jit_free(struct sk_filter *fp)
26066 {
26067 if (fp->bpf_func != sk_run_filter) {
26068- struct work_struct *work = (struct work_struct *)fp->bpf_func;
26069+ struct work_struct *work = &fp->work->work;
26070
26071 INIT_WORK(work, jit_free_defer);
26072+ fp->work->image = fp->bpf_func;
26073 schedule_work(work);
26074 }
26075 }
26076diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26077index d6aa6e8..266395a 100644
26078--- a/arch/x86/oprofile/backtrace.c
26079+++ b/arch/x86/oprofile/backtrace.c
26080@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
26081 struct stack_frame_ia32 *fp;
26082 unsigned long bytes;
26083
26084- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26085+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26086 if (bytes != sizeof(bufhead))
26087 return NULL;
26088
26089- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
26090+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
26091
26092 oprofile_add_trace(bufhead[0].return_address);
26093
26094@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
26095 struct stack_frame bufhead[2];
26096 unsigned long bytes;
26097
26098- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26099+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26100 if (bytes != sizeof(bufhead))
26101 return NULL;
26102
26103@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26104 {
26105 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
26106
26107- if (!user_mode_vm(regs)) {
26108+ if (!user_mode(regs)) {
26109 unsigned long stack = kernel_stack_pointer(regs);
26110 if (depth)
26111 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26112diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
26113index 140942f..8a5cc55 100644
26114--- a/arch/x86/pci/mrst.c
26115+++ b/arch/x86/pci/mrst.c
26116@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
26117 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
26118 pci_mmcfg_late_init();
26119 pcibios_enable_irq = mrst_pci_irq_enable;
26120- pci_root_ops = pci_mrst_ops;
26121+ pax_open_kernel();
26122+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
26123+ pax_close_kernel();
26124 pci_soc_mode = 1;
26125 /* Continue with standard init */
26126 return 1;
26127diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26128index da8fe05..7ee6704 100644
26129--- a/arch/x86/pci/pcbios.c
26130+++ b/arch/x86/pci/pcbios.c
26131@@ -79,50 +79,93 @@ union bios32 {
26132 static struct {
26133 unsigned long address;
26134 unsigned short segment;
26135-} bios32_indirect = { 0, __KERNEL_CS };
26136+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26137
26138 /*
26139 * Returns the entry point for the given service, NULL on error
26140 */
26141
26142-static unsigned long bios32_service(unsigned long service)
26143+static unsigned long __devinit bios32_service(unsigned long service)
26144 {
26145 unsigned char return_code; /* %al */
26146 unsigned long address; /* %ebx */
26147 unsigned long length; /* %ecx */
26148 unsigned long entry; /* %edx */
26149 unsigned long flags;
26150+ struct desc_struct d, *gdt;
26151
26152 local_irq_save(flags);
26153- __asm__("lcall *(%%edi); cld"
26154+
26155+ gdt = get_cpu_gdt_table(smp_processor_id());
26156+
26157+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26158+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26159+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26160+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26161+
26162+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26163 : "=a" (return_code),
26164 "=b" (address),
26165 "=c" (length),
26166 "=d" (entry)
26167 : "0" (service),
26168 "1" (0),
26169- "D" (&bios32_indirect));
26170+ "D" (&bios32_indirect),
26171+ "r"(__PCIBIOS_DS)
26172+ : "memory");
26173+
26174+ pax_open_kernel();
26175+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26176+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26177+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26178+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26179+ pax_close_kernel();
26180+
26181 local_irq_restore(flags);
26182
26183 switch (return_code) {
26184- case 0:
26185- return address + entry;
26186- case 0x80: /* Not present */
26187- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26188- return 0;
26189- default: /* Shouldn't happen */
26190- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26191- service, return_code);
26192+ case 0: {
26193+ int cpu;
26194+ unsigned char flags;
26195+
26196+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26197+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26198+ printk(KERN_WARNING "bios32_service: not valid\n");
26199 return 0;
26200+ }
26201+ address = address + PAGE_OFFSET;
26202+ length += 16UL; /* some BIOSs underreport this... */
26203+ flags = 4;
26204+ if (length >= 64*1024*1024) {
26205+ length >>= PAGE_SHIFT;
26206+ flags |= 8;
26207+ }
26208+
26209+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26210+ gdt = get_cpu_gdt_table(cpu);
26211+ pack_descriptor(&d, address, length, 0x9b, flags);
26212+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26213+ pack_descriptor(&d, address, length, 0x93, flags);
26214+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26215+ }
26216+ return entry;
26217+ }
26218+ case 0x80: /* Not present */
26219+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26220+ return 0;
26221+ default: /* Shouldn't happen */
26222+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26223+ service, return_code);
26224+ return 0;
26225 }
26226 }
26227
26228 static struct {
26229 unsigned long address;
26230 unsigned short segment;
26231-} pci_indirect = { 0, __KERNEL_CS };
26232+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26233
26234-static int pci_bios_present;
26235+static int pci_bios_present __read_only;
26236
26237 static int __devinit check_pcibios(void)
26238 {
26239@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
26240 unsigned long flags, pcibios_entry;
26241
26242 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26243- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26244+ pci_indirect.address = pcibios_entry;
26245
26246 local_irq_save(flags);
26247- __asm__(
26248- "lcall *(%%edi); cld\n\t"
26249+ __asm__("movw %w6, %%ds\n\t"
26250+ "lcall *%%ss:(%%edi); cld\n\t"
26251+ "push %%ss\n\t"
26252+ "pop %%ds\n\t"
26253 "jc 1f\n\t"
26254 "xor %%ah, %%ah\n"
26255 "1:"
26256@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
26257 "=b" (ebx),
26258 "=c" (ecx)
26259 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26260- "D" (&pci_indirect)
26261+ "D" (&pci_indirect),
26262+ "r" (__PCIBIOS_DS)
26263 : "memory");
26264 local_irq_restore(flags);
26265
26266@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26267
26268 switch (len) {
26269 case 1:
26270- __asm__("lcall *(%%esi); cld\n\t"
26271+ __asm__("movw %w6, %%ds\n\t"
26272+ "lcall *%%ss:(%%esi); cld\n\t"
26273+ "push %%ss\n\t"
26274+ "pop %%ds\n\t"
26275 "jc 1f\n\t"
26276 "xor %%ah, %%ah\n"
26277 "1:"
26278@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26279 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26280 "b" (bx),
26281 "D" ((long)reg),
26282- "S" (&pci_indirect));
26283+ "S" (&pci_indirect),
26284+ "r" (__PCIBIOS_DS));
26285 /*
26286 * Zero-extend the result beyond 8 bits, do not trust the
26287 * BIOS having done it:
26288@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26289 *value &= 0xff;
26290 break;
26291 case 2:
26292- __asm__("lcall *(%%esi); cld\n\t"
26293+ __asm__("movw %w6, %%ds\n\t"
26294+ "lcall *%%ss:(%%esi); cld\n\t"
26295+ "push %%ss\n\t"
26296+ "pop %%ds\n\t"
26297 "jc 1f\n\t"
26298 "xor %%ah, %%ah\n"
26299 "1:"
26300@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26301 : "1" (PCIBIOS_READ_CONFIG_WORD),
26302 "b" (bx),
26303 "D" ((long)reg),
26304- "S" (&pci_indirect));
26305+ "S" (&pci_indirect),
26306+ "r" (__PCIBIOS_DS));
26307 /*
26308 * Zero-extend the result beyond 16 bits, do not trust the
26309 * BIOS having done it:
26310@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26311 *value &= 0xffff;
26312 break;
26313 case 4:
26314- __asm__("lcall *(%%esi); cld\n\t"
26315+ __asm__("movw %w6, %%ds\n\t"
26316+ "lcall *%%ss:(%%esi); cld\n\t"
26317+ "push %%ss\n\t"
26318+ "pop %%ds\n\t"
26319 "jc 1f\n\t"
26320 "xor %%ah, %%ah\n"
26321 "1:"
26322@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26323 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26324 "b" (bx),
26325 "D" ((long)reg),
26326- "S" (&pci_indirect));
26327+ "S" (&pci_indirect),
26328+ "r" (__PCIBIOS_DS));
26329 break;
26330 }
26331
26332@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26333
26334 switch (len) {
26335 case 1:
26336- __asm__("lcall *(%%esi); cld\n\t"
26337+ __asm__("movw %w6, %%ds\n\t"
26338+ "lcall *%%ss:(%%esi); cld\n\t"
26339+ "push %%ss\n\t"
26340+ "pop %%ds\n\t"
26341 "jc 1f\n\t"
26342 "xor %%ah, %%ah\n"
26343 "1:"
26344@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26345 "c" (value),
26346 "b" (bx),
26347 "D" ((long)reg),
26348- "S" (&pci_indirect));
26349+ "S" (&pci_indirect),
26350+ "r" (__PCIBIOS_DS));
26351 break;
26352 case 2:
26353- __asm__("lcall *(%%esi); cld\n\t"
26354+ __asm__("movw %w6, %%ds\n\t"
26355+ "lcall *%%ss:(%%esi); cld\n\t"
26356+ "push %%ss\n\t"
26357+ "pop %%ds\n\t"
26358 "jc 1f\n\t"
26359 "xor %%ah, %%ah\n"
26360 "1:"
26361@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26362 "c" (value),
26363 "b" (bx),
26364 "D" ((long)reg),
26365- "S" (&pci_indirect));
26366+ "S" (&pci_indirect),
26367+ "r" (__PCIBIOS_DS));
26368 break;
26369 case 4:
26370- __asm__("lcall *(%%esi); cld\n\t"
26371+ __asm__("movw %w6, %%ds\n\t"
26372+ "lcall *%%ss:(%%esi); cld\n\t"
26373+ "push %%ss\n\t"
26374+ "pop %%ds\n\t"
26375 "jc 1f\n\t"
26376 "xor %%ah, %%ah\n"
26377 "1:"
26378@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26379 "c" (value),
26380 "b" (bx),
26381 "D" ((long)reg),
26382- "S" (&pci_indirect));
26383+ "S" (&pci_indirect),
26384+ "r" (__PCIBIOS_DS));
26385 break;
26386 }
26387
26388@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26389
26390 DBG("PCI: Fetching IRQ routing table... ");
26391 __asm__("push %%es\n\t"
26392+ "movw %w8, %%ds\n\t"
26393 "push %%ds\n\t"
26394 "pop %%es\n\t"
26395- "lcall *(%%esi); cld\n\t"
26396+ "lcall *%%ss:(%%esi); cld\n\t"
26397 "pop %%es\n\t"
26398+ "push %%ss\n\t"
26399+ "pop %%ds\n"
26400 "jc 1f\n\t"
26401 "xor %%ah, %%ah\n"
26402 "1:"
26403@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26404 "1" (0),
26405 "D" ((long) &opt),
26406 "S" (&pci_indirect),
26407- "m" (opt)
26408+ "m" (opt),
26409+ "r" (__PCIBIOS_DS)
26410 : "memory");
26411 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26412 if (ret & 0xff00)
26413@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26414 {
26415 int ret;
26416
26417- __asm__("lcall *(%%esi); cld\n\t"
26418+ __asm__("movw %w5, %%ds\n\t"
26419+ "lcall *%%ss:(%%esi); cld\n\t"
26420+ "push %%ss\n\t"
26421+ "pop %%ds\n"
26422 "jc 1f\n\t"
26423 "xor %%ah, %%ah\n"
26424 "1:"
26425@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26426 : "0" (PCIBIOS_SET_PCI_HW_INT),
26427 "b" ((dev->bus->number << 8) | dev->devfn),
26428 "c" ((irq << 8) | (pin + 10)),
26429- "S" (&pci_indirect));
26430+ "S" (&pci_indirect),
26431+ "r" (__PCIBIOS_DS));
26432 return !(ret & 0xff00);
26433 }
26434 EXPORT_SYMBOL(pcibios_set_irq_routing);
26435diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
26436index 40e4469..1ab536e 100644
26437--- a/arch/x86/platform/efi/efi_32.c
26438+++ b/arch/x86/platform/efi/efi_32.c
26439@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
26440 {
26441 struct desc_ptr gdt_descr;
26442
26443+#ifdef CONFIG_PAX_KERNEXEC
26444+ struct desc_struct d;
26445+#endif
26446+
26447 local_irq_save(efi_rt_eflags);
26448
26449 load_cr3(initial_page_table);
26450 __flush_tlb_all();
26451
26452+#ifdef CONFIG_PAX_KERNEXEC
26453+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
26454+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26455+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
26456+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26457+#endif
26458+
26459 gdt_descr.address = __pa(get_cpu_gdt_table(0));
26460 gdt_descr.size = GDT_SIZE - 1;
26461 load_gdt(&gdt_descr);
26462@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
26463 {
26464 struct desc_ptr gdt_descr;
26465
26466+#ifdef CONFIG_PAX_KERNEXEC
26467+ struct desc_struct d;
26468+
26469+ memset(&d, 0, sizeof d);
26470+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26471+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26472+#endif
26473+
26474 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
26475 gdt_descr.size = GDT_SIZE - 1;
26476 load_gdt(&gdt_descr);
26477diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26478index fbe66e6..eae5e38 100644
26479--- a/arch/x86/platform/efi/efi_stub_32.S
26480+++ b/arch/x86/platform/efi/efi_stub_32.S
26481@@ -6,7 +6,9 @@
26482 */
26483
26484 #include <linux/linkage.h>
26485+#include <linux/init.h>
26486 #include <asm/page_types.h>
26487+#include <asm/segment.h>
26488
26489 /*
26490 * efi_call_phys(void *, ...) is a function with variable parameters.
26491@@ -20,7 +22,7 @@
26492 * service functions will comply with gcc calling convention, too.
26493 */
26494
26495-.text
26496+__INIT
26497 ENTRY(efi_call_phys)
26498 /*
26499 * 0. The function can only be called in Linux kernel. So CS has been
26500@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
26501 * The mapping of lower virtual memory has been created in prelog and
26502 * epilog.
26503 */
26504- movl $1f, %edx
26505- subl $__PAGE_OFFSET, %edx
26506- jmp *%edx
26507+#ifdef CONFIG_PAX_KERNEXEC
26508+ movl $(__KERNEXEC_EFI_DS), %edx
26509+ mov %edx, %ds
26510+ mov %edx, %es
26511+ mov %edx, %ss
26512+ addl $2f,(1f)
26513+ ljmp *(1f)
26514+
26515+__INITDATA
26516+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
26517+.previous
26518+
26519+2:
26520+ subl $2b,(1b)
26521+#else
26522+ jmp 1f-__PAGE_OFFSET
26523 1:
26524+#endif
26525
26526 /*
26527 * 2. Now on the top of stack is the return
26528@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
26529 * parameter 2, ..., param n. To make things easy, we save the return
26530 * address of efi_call_phys in a global variable.
26531 */
26532- popl %edx
26533- movl %edx, saved_return_addr
26534- /* get the function pointer into ECX*/
26535- popl %ecx
26536- movl %ecx, efi_rt_function_ptr
26537- movl $2f, %edx
26538- subl $__PAGE_OFFSET, %edx
26539- pushl %edx
26540+ popl (saved_return_addr)
26541+ popl (efi_rt_function_ptr)
26542
26543 /*
26544 * 3. Clear PG bit in %CR0.
26545@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
26546 /*
26547 * 5. Call the physical function.
26548 */
26549- jmp *%ecx
26550+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
26551
26552-2:
26553 /*
26554 * 6. After EFI runtime service returns, control will return to
26555 * following instruction. We'd better readjust stack pointer first.
26556@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
26557 movl %cr0, %edx
26558 orl $0x80000000, %edx
26559 movl %edx, %cr0
26560- jmp 1f
26561-1:
26562+
26563 /*
26564 * 8. Now restore the virtual mode from flat mode by
26565 * adding EIP with PAGE_OFFSET.
26566 */
26567- movl $1f, %edx
26568- jmp *%edx
26569+#ifdef CONFIG_PAX_KERNEXEC
26570+ movl $(__KERNEL_DS), %edx
26571+ mov %edx, %ds
26572+ mov %edx, %es
26573+ mov %edx, %ss
26574+ ljmp $(__KERNEL_CS),$1f
26575+#else
26576+ jmp 1f+__PAGE_OFFSET
26577+#endif
26578 1:
26579
26580 /*
26581 * 9. Balance the stack. And because EAX contain the return value,
26582 * we'd better not clobber it.
26583 */
26584- leal efi_rt_function_ptr, %edx
26585- movl (%edx), %ecx
26586- pushl %ecx
26587+ pushl (efi_rt_function_ptr)
26588
26589 /*
26590- * 10. Push the saved return address onto the stack and return.
26591+ * 10. Return to the saved return address.
26592 */
26593- leal saved_return_addr, %edx
26594- movl (%edx), %ecx
26595- pushl %ecx
26596- ret
26597+ jmpl *(saved_return_addr)
26598 ENDPROC(efi_call_phys)
26599 .previous
26600
26601-.data
26602+__INITDATA
26603 saved_return_addr:
26604 .long 0
26605 efi_rt_function_ptr:
26606diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26607index 4c07cca..2c8427d 100644
26608--- a/arch/x86/platform/efi/efi_stub_64.S
26609+++ b/arch/x86/platform/efi/efi_stub_64.S
26610@@ -7,6 +7,7 @@
26611 */
26612
26613 #include <linux/linkage.h>
26614+#include <asm/alternative-asm.h>
26615
26616 #define SAVE_XMM \
26617 mov %rsp, %rax; \
26618@@ -40,6 +41,7 @@ ENTRY(efi_call0)
26619 call *%rdi
26620 addq $32, %rsp
26621 RESTORE_XMM
26622+ pax_force_retaddr 0, 1
26623 ret
26624 ENDPROC(efi_call0)
26625
26626@@ -50,6 +52,7 @@ ENTRY(efi_call1)
26627 call *%rdi
26628 addq $32, %rsp
26629 RESTORE_XMM
26630+ pax_force_retaddr 0, 1
26631 ret
26632 ENDPROC(efi_call1)
26633
26634@@ -60,6 +63,7 @@ ENTRY(efi_call2)
26635 call *%rdi
26636 addq $32, %rsp
26637 RESTORE_XMM
26638+ pax_force_retaddr 0, 1
26639 ret
26640 ENDPROC(efi_call2)
26641
26642@@ -71,6 +75,7 @@ ENTRY(efi_call3)
26643 call *%rdi
26644 addq $32, %rsp
26645 RESTORE_XMM
26646+ pax_force_retaddr 0, 1
26647 ret
26648 ENDPROC(efi_call3)
26649
26650@@ -83,6 +88,7 @@ ENTRY(efi_call4)
26651 call *%rdi
26652 addq $32, %rsp
26653 RESTORE_XMM
26654+ pax_force_retaddr 0, 1
26655 ret
26656 ENDPROC(efi_call4)
26657
26658@@ -96,6 +102,7 @@ ENTRY(efi_call5)
26659 call *%rdi
26660 addq $48, %rsp
26661 RESTORE_XMM
26662+ pax_force_retaddr 0, 1
26663 ret
26664 ENDPROC(efi_call5)
26665
26666@@ -112,5 +119,6 @@ ENTRY(efi_call6)
26667 call *%rdi
26668 addq $48, %rsp
26669 RESTORE_XMM
26670+ pax_force_retaddr 0, 1
26671 ret
26672 ENDPROC(efi_call6)
26673diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
26674index fd41a92..9c33628 100644
26675--- a/arch/x86/platform/mrst/mrst.c
26676+++ b/arch/x86/platform/mrst/mrst.c
26677@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
26678 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
26679 int sfi_mrtc_num;
26680
26681-static void mrst_power_off(void)
26682+static __noreturn void mrst_power_off(void)
26683 {
26684+ BUG();
26685 }
26686
26687-static void mrst_reboot(void)
26688+static __noreturn void mrst_reboot(void)
26689 {
26690 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
26691+ BUG();
26692 }
26693
26694 /* parse all the mtimer info to a static mtimer array */
26695diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26696index 218cdb1..fd55c08 100644
26697--- a/arch/x86/power/cpu.c
26698+++ b/arch/x86/power/cpu.c
26699@@ -132,7 +132,7 @@ static void do_fpu_end(void)
26700 static void fix_processor_context(void)
26701 {
26702 int cpu = smp_processor_id();
26703- struct tss_struct *t = &per_cpu(init_tss, cpu);
26704+ struct tss_struct *t = init_tss + cpu;
26705
26706 set_tss_desc(cpu, t); /*
26707 * This just modifies memory; should not be
26708@@ -142,7 +142,9 @@ static void fix_processor_context(void)
26709 */
26710
26711 #ifdef CONFIG_X86_64
26712+ pax_open_kernel();
26713 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26714+ pax_close_kernel();
26715
26716 syscall_init(); /* This sets MSR_*STAR and related */
26717 #endif
26718diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
26719index cbca565..bae7133 100644
26720--- a/arch/x86/realmode/init.c
26721+++ b/arch/x86/realmode/init.c
26722@@ -62,7 +62,13 @@ void __init setup_real_mode(void)
26723 __va(real_mode_header->trampoline_header);
26724
26725 #ifdef CONFIG_X86_32
26726- trampoline_header->start = __pa(startup_32_smp);
26727+ trampoline_header->start = __pa(ktla_ktva(startup_32_smp));
26728+
26729+#ifdef CONFIG_PAX_KERNEXEC
26730+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
26731+#endif
26732+
26733+ trampoline_header->boot_cs = __BOOT_CS;
26734 trampoline_header->gdt_limit = __BOOT_DS + 7;
26735 trampoline_header->gdt_base = __pa(boot_gdt);
26736 #else
26737diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
26738index 5b84a2d..a004393 100644
26739--- a/arch/x86/realmode/rm/Makefile
26740+++ b/arch/x86/realmode/rm/Makefile
26741@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
26742 $(call cc-option, -fno-unit-at-a-time)) \
26743 $(call cc-option, -fno-stack-protector) \
26744 $(call cc-option, -mpreferred-stack-boundary=2)
26745+ifdef CONSTIFY_PLUGIN
26746+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
26747+endif
26748 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
26749 GCOV_PROFILE := n
26750diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
26751index c1b2791..f9e31c7 100644
26752--- a/arch/x86/realmode/rm/trampoline_32.S
26753+++ b/arch/x86/realmode/rm/trampoline_32.S
26754@@ -25,6 +25,12 @@
26755 #include <asm/page_types.h>
26756 #include "realmode.h"
26757
26758+#ifdef CONFIG_PAX_KERNEXEC
26759+#define ta(X) (X)
26760+#else
26761+#define ta(X) (pa_ ## X)
26762+#endif
26763+
26764 .text
26765 .code16
26766
26767@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
26768
26769 cli # We should be safe anyway
26770
26771- movl tr_start, %eax # where we need to go
26772-
26773 movl $0xA5A5A5A5, trampoline_status
26774 # write marker for master knows we're running
26775
26776@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
26777 movw $1, %dx # protected mode (PE) bit
26778 lmsw %dx # into protected mode
26779
26780- ljmpl $__BOOT_CS, $pa_startup_32
26781+ ljmpl *(trampoline_header)
26782
26783 .section ".text32","ax"
26784 .code32
26785@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
26786 .balign 8
26787 GLOBAL(trampoline_header)
26788 tr_start: .space 4
26789- tr_gdt_pad: .space 2
26790+ tr_boot_cs: .space 2
26791 tr_gdt: .space 6
26792 END(trampoline_header)
26793
26794diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
26795index bb360dc..3e5945f 100644
26796--- a/arch/x86/realmode/rm/trampoline_64.S
26797+++ b/arch/x86/realmode/rm/trampoline_64.S
26798@@ -107,7 +107,7 @@ ENTRY(startup_32)
26799 wrmsr
26800
26801 # Enable paging and in turn activate Long Mode
26802- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
26803+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
26804 movl %eax, %cr0
26805
26806 /*
26807diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
26808index 5a1847d..deccb30 100644
26809--- a/arch/x86/tools/relocs.c
26810+++ b/arch/x86/tools/relocs.c
26811@@ -12,10 +12,13 @@
26812 #include <regex.h>
26813 #include <tools/le_byteshift.h>
26814
26815+#include "../../../include/generated/autoconf.h"
26816+
26817 static void die(char *fmt, ...);
26818
26819 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
26820 static Elf32_Ehdr ehdr;
26821+static Elf32_Phdr *phdr;
26822 static unsigned long reloc_count, reloc_idx;
26823 static unsigned long *relocs;
26824 static unsigned long reloc16_count, reloc16_idx;
26825@@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
26826 }
26827 }
26828
26829+static void read_phdrs(FILE *fp)
26830+{
26831+ unsigned int i;
26832+
26833+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
26834+ if (!phdr) {
26835+ die("Unable to allocate %d program headers\n",
26836+ ehdr.e_phnum);
26837+ }
26838+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
26839+ die("Seek to %d failed: %s\n",
26840+ ehdr.e_phoff, strerror(errno));
26841+ }
26842+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
26843+ die("Cannot read ELF program headers: %s\n",
26844+ strerror(errno));
26845+ }
26846+ for(i = 0; i < ehdr.e_phnum; i++) {
26847+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
26848+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
26849+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
26850+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
26851+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
26852+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
26853+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
26854+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
26855+ }
26856+
26857+}
26858+
26859 static void read_shdrs(FILE *fp)
26860 {
26861- int i;
26862+ unsigned int i;
26863 Elf32_Shdr shdr;
26864
26865 secs = calloc(ehdr.e_shnum, sizeof(struct section));
26866@@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
26867
26868 static void read_strtabs(FILE *fp)
26869 {
26870- int i;
26871+ unsigned int i;
26872 for (i = 0; i < ehdr.e_shnum; i++) {
26873 struct section *sec = &secs[i];
26874 if (sec->shdr.sh_type != SHT_STRTAB) {
26875@@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
26876
26877 static void read_symtabs(FILE *fp)
26878 {
26879- int i,j;
26880+ unsigned int i,j;
26881 for (i = 0; i < ehdr.e_shnum; i++) {
26882 struct section *sec = &secs[i];
26883 if (sec->shdr.sh_type != SHT_SYMTAB) {
26884@@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
26885 }
26886
26887
26888-static void read_relocs(FILE *fp)
26889+static void read_relocs(FILE *fp, int use_real_mode)
26890 {
26891- int i,j;
26892+ unsigned int i,j;
26893+ uint32_t base;
26894+
26895 for (i = 0; i < ehdr.e_shnum; i++) {
26896 struct section *sec = &secs[i];
26897 if (sec->shdr.sh_type != SHT_REL) {
26898@@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
26899 die("Cannot read symbol table: %s\n",
26900 strerror(errno));
26901 }
26902+ base = 0;
26903+
26904+#ifdef CONFIG_X86_32
26905+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
26906+ if (phdr[j].p_type != PT_LOAD )
26907+ continue;
26908+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
26909+ continue;
26910+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
26911+ break;
26912+ }
26913+#endif
26914+
26915 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
26916 Elf32_Rel *rel = &sec->reltab[j];
26917- rel->r_offset = elf32_to_cpu(rel->r_offset);
26918+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
26919 rel->r_info = elf32_to_cpu(rel->r_info);
26920 }
26921 }
26922@@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
26923
26924 static void print_absolute_symbols(void)
26925 {
26926- int i;
26927+ unsigned int i;
26928 printf("Absolute symbols\n");
26929 printf(" Num: Value Size Type Bind Visibility Name\n");
26930 for (i = 0; i < ehdr.e_shnum; i++) {
26931 struct section *sec = &secs[i];
26932 char *sym_strtab;
26933- int j;
26934+ unsigned int j;
26935
26936 if (sec->shdr.sh_type != SHT_SYMTAB) {
26937 continue;
26938@@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
26939
26940 static void print_absolute_relocs(void)
26941 {
26942- int i, printed = 0;
26943+ unsigned int i, printed = 0;
26944
26945 for (i = 0; i < ehdr.e_shnum; i++) {
26946 struct section *sec = &secs[i];
26947 struct section *sec_applies, *sec_symtab;
26948 char *sym_strtab;
26949 Elf32_Sym *sh_symtab;
26950- int j;
26951+ unsigned int j;
26952 if (sec->shdr.sh_type != SHT_REL) {
26953 continue;
26954 }
26955@@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
26956 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26957 int use_real_mode)
26958 {
26959- int i;
26960+ unsigned int i;
26961 /* Walk through the relocations */
26962 for (i = 0; i < ehdr.e_shnum; i++) {
26963 char *sym_strtab;
26964 Elf32_Sym *sh_symtab;
26965 struct section *sec_applies, *sec_symtab;
26966- int j;
26967+ unsigned int j;
26968 struct section *sec = &secs[i];
26969
26970 if (sec->shdr.sh_type != SHT_REL) {
26971@@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26972 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
26973 r_type = ELF32_R_TYPE(rel->r_info);
26974
26975+ if (!use_real_mode) {
26976+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
26977+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
26978+ continue;
26979+
26980+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26981+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
26982+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
26983+ continue;
26984+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
26985+ continue;
26986+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
26987+ continue;
26988+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
26989+ continue;
26990+#endif
26991+ }
26992+
26993 shn_abs = sym->st_shndx == SHN_ABS;
26994
26995 switch (r_type) {
26996@@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
26997
26998 static void emit_relocs(int as_text, int use_real_mode)
26999 {
27000- int i;
27001+ unsigned int i;
27002 /* Count how many relocations I have and allocate space for them. */
27003 reloc_count = 0;
27004 walk_relocs(count_reloc, use_real_mode);
27005@@ -808,10 +874,11 @@ int main(int argc, char **argv)
27006 fname, strerror(errno));
27007 }
27008 read_ehdr(fp);
27009+ read_phdrs(fp);
27010 read_shdrs(fp);
27011 read_strtabs(fp);
27012 read_symtabs(fp);
27013- read_relocs(fp);
27014+ read_relocs(fp, use_real_mode);
27015 if (show_absolute_syms) {
27016 print_absolute_symbols();
27017 return 0;
27018diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
27019index fd14be1..e3c79c0 100644
27020--- a/arch/x86/vdso/Makefile
27021+++ b/arch/x86/vdso/Makefile
27022@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
27023 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
27024 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
27025
27026-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
27027+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
27028 GCOV_PROFILE := n
27029
27030 #
27031diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
27032index 66e6d93..587f435 100644
27033--- a/arch/x86/vdso/vdso32-setup.c
27034+++ b/arch/x86/vdso/vdso32-setup.c
27035@@ -25,6 +25,7 @@
27036 #include <asm/tlbflush.h>
27037 #include <asm/vdso.h>
27038 #include <asm/proto.h>
27039+#include <asm/mman.h>
27040
27041 enum {
27042 VDSO_DISABLED = 0,
27043@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
27044 void enable_sep_cpu(void)
27045 {
27046 int cpu = get_cpu();
27047- struct tss_struct *tss = &per_cpu(init_tss, cpu);
27048+ struct tss_struct *tss = init_tss + cpu;
27049
27050 if (!boot_cpu_has(X86_FEATURE_SEP)) {
27051 put_cpu();
27052@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
27053 gate_vma.vm_start = FIXADDR_USER_START;
27054 gate_vma.vm_end = FIXADDR_USER_END;
27055 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
27056- gate_vma.vm_page_prot = __P101;
27057+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
27058
27059 return 0;
27060 }
27061@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27062 if (compat)
27063 addr = VDSO_HIGH_BASE;
27064 else {
27065- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
27066+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
27067 if (IS_ERR_VALUE(addr)) {
27068 ret = addr;
27069 goto up_fail;
27070 }
27071 }
27072
27073- current->mm->context.vdso = (void *)addr;
27074+ current->mm->context.vdso = addr;
27075
27076 if (compat_uses_vma || !compat) {
27077 /*
27078@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27079 }
27080
27081 current_thread_info()->sysenter_return =
27082- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27083+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27084
27085 up_fail:
27086 if (ret)
27087- current->mm->context.vdso = NULL;
27088+ current->mm->context.vdso = 0;
27089
27090 up_write(&mm->mmap_sem);
27091
27092@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
27093
27094 const char *arch_vma_name(struct vm_area_struct *vma)
27095 {
27096- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27097+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27098 return "[vdso]";
27099+
27100+#ifdef CONFIG_PAX_SEGMEXEC
27101+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27102+ return "[vdso]";
27103+#endif
27104+
27105 return NULL;
27106 }
27107
27108@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
27109 * Check to see if the corresponding task was created in compat vdso
27110 * mode.
27111 */
27112- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27113+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27114 return &gate_vma;
27115 return NULL;
27116 }
27117diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
27118index 00aaf04..4a26505 100644
27119--- a/arch/x86/vdso/vma.c
27120+++ b/arch/x86/vdso/vma.c
27121@@ -16,8 +16,6 @@
27122 #include <asm/vdso.h>
27123 #include <asm/page.h>
27124
27125-unsigned int __read_mostly vdso_enabled = 1;
27126-
27127 extern char vdso_start[], vdso_end[];
27128 extern unsigned short vdso_sync_cpuid;
27129
27130@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27131 * unaligned here as a result of stack start randomization.
27132 */
27133 addr = PAGE_ALIGN(addr);
27134- addr = align_addr(addr, NULL, ALIGN_VDSO);
27135
27136 return addr;
27137 }
27138@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
27139 unsigned size)
27140 {
27141 struct mm_struct *mm = current->mm;
27142- unsigned long addr;
27143+ unsigned long addr = 0;
27144 int ret;
27145
27146- if (!vdso_enabled)
27147- return 0;
27148-
27149 down_write(&mm->mmap_sem);
27150+
27151+#ifdef CONFIG_PAX_RANDMMAP
27152+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27153+#endif
27154+
27155 addr = vdso_addr(mm->start_stack, size);
27156+ addr = align_addr(addr, NULL, ALIGN_VDSO);
27157 addr = get_unmapped_area(NULL, addr, size, 0, 0);
27158 if (IS_ERR_VALUE(addr)) {
27159 ret = addr;
27160 goto up_fail;
27161 }
27162
27163- current->mm->context.vdso = (void *)addr;
27164+ mm->context.vdso = addr;
27165
27166 ret = install_special_mapping(mm, addr, size,
27167 VM_READ|VM_EXEC|
27168 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
27169 pages);
27170- if (ret) {
27171- current->mm->context.vdso = NULL;
27172- goto up_fail;
27173- }
27174+ if (ret)
27175+ mm->context.vdso = 0;
27176
27177 up_fail:
27178 up_write(&mm->mmap_sem);
27179@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27180 vdsox32_size);
27181 }
27182 #endif
27183-
27184-static __init int vdso_setup(char *s)
27185-{
27186- vdso_enabled = simple_strtoul(s, NULL, 0);
27187- return 0;
27188-}
27189-__setup("vdso=", vdso_setup);
27190diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27191index ff962d4..2bb5e83 100644
27192--- a/arch/x86/xen/enlighten.c
27193+++ b/arch/x86/xen/enlighten.c
27194@@ -97,8 +97,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27195
27196 struct shared_info xen_dummy_shared_info;
27197
27198-void *xen_initial_gdt;
27199-
27200 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
27201 __read_mostly int xen_have_vector_callback;
27202 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
27203@@ -1175,30 +1173,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
27204 #endif
27205 };
27206
27207-static void xen_reboot(int reason)
27208+static __noreturn void xen_reboot(int reason)
27209 {
27210 struct sched_shutdown r = { .reason = reason };
27211
27212- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
27213- BUG();
27214+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
27215+ BUG();
27216 }
27217
27218-static void xen_restart(char *msg)
27219+static __noreturn void xen_restart(char *msg)
27220 {
27221 xen_reboot(SHUTDOWN_reboot);
27222 }
27223
27224-static void xen_emergency_restart(void)
27225+static __noreturn void xen_emergency_restart(void)
27226 {
27227 xen_reboot(SHUTDOWN_reboot);
27228 }
27229
27230-static void xen_machine_halt(void)
27231+static __noreturn void xen_machine_halt(void)
27232 {
27233 xen_reboot(SHUTDOWN_poweroff);
27234 }
27235
27236-static void xen_machine_power_off(void)
27237+static __noreturn void xen_machine_power_off(void)
27238 {
27239 if (pm_power_off)
27240 pm_power_off();
27241@@ -1301,7 +1299,17 @@ asmlinkage void __init xen_start_kernel(void)
27242 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27243
27244 /* Work out if we support NX */
27245- x86_configure_nx();
27246+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27247+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27248+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27249+ unsigned l, h;
27250+
27251+ __supported_pte_mask |= _PAGE_NX;
27252+ rdmsr(MSR_EFER, l, h);
27253+ l |= EFER_NX;
27254+ wrmsr(MSR_EFER, l, h);
27255+ }
27256+#endif
27257
27258 xen_setup_features();
27259
27260@@ -1332,13 +1340,6 @@ asmlinkage void __init xen_start_kernel(void)
27261
27262 machine_ops = xen_machine_ops;
27263
27264- /*
27265- * The only reliable way to retain the initial address of the
27266- * percpu gdt_page is to remember it here, so we can go and
27267- * mark it RW later, when the initial percpu area is freed.
27268- */
27269- xen_initial_gdt = &per_cpu(gdt_page, 0);
27270-
27271 xen_smp_init();
27272
27273 #ifdef CONFIG_ACPI_NUMA
27274diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27275index 3a73785..0d30df2 100644
27276--- a/arch/x86/xen/mmu.c
27277+++ b/arch/x86/xen/mmu.c
27278@@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27279 convert_pfn_mfn(init_level4_pgt);
27280 convert_pfn_mfn(level3_ident_pgt);
27281 convert_pfn_mfn(level3_kernel_pgt);
27282+ convert_pfn_mfn(level3_vmalloc_start_pgt);
27283+ convert_pfn_mfn(level3_vmalloc_end_pgt);
27284+ convert_pfn_mfn(level3_vmemmap_pgt);
27285
27286 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27287 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27288@@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27289 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27290 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27291 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27292+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27293+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27294+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27295 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27296+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27297 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27298 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27299
27300@@ -1940,6 +1947,7 @@ static void __init xen_post_allocator_init(void)
27301 pv_mmu_ops.set_pud = xen_set_pud;
27302 #if PAGETABLE_LEVELS == 4
27303 pv_mmu_ops.set_pgd = xen_set_pgd;
27304+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27305 #endif
27306
27307 /* This will work as long as patching hasn't happened yet
27308@@ -2021,6 +2029,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
27309 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27310 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27311 .set_pgd = xen_set_pgd_hyper,
27312+ .set_pgd_batched = xen_set_pgd_hyper,
27313
27314 .alloc_pud = xen_alloc_pmd_init,
27315 .release_pud = xen_release_pmd_init,
27316diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27317index afb250d..627075f 100644
27318--- a/arch/x86/xen/smp.c
27319+++ b/arch/x86/xen/smp.c
27320@@ -231,11 +231,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27321 {
27322 BUG_ON(smp_processor_id() != 0);
27323 native_smp_prepare_boot_cpu();
27324-
27325- /* We've switched to the "real" per-cpu gdt, so make sure the
27326- old memory can be recycled */
27327- make_lowmem_page_readwrite(xen_initial_gdt);
27328-
27329 xen_filter_cpu_maps();
27330 xen_setup_vcpu_info_placement();
27331 }
27332@@ -302,12 +297,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27333 gdt = get_cpu_gdt_table(cpu);
27334
27335 ctxt->flags = VGCF_IN_KERNEL;
27336- ctxt->user_regs.ds = __USER_DS;
27337- ctxt->user_regs.es = __USER_DS;
27338+ ctxt->user_regs.ds = __KERNEL_DS;
27339+ ctxt->user_regs.es = __KERNEL_DS;
27340 ctxt->user_regs.ss = __KERNEL_DS;
27341 #ifdef CONFIG_X86_32
27342 ctxt->user_regs.fs = __KERNEL_PERCPU;
27343- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27344+ savesegment(gs, ctxt->user_regs.gs);
27345 #else
27346 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27347 #endif
27348@@ -357,13 +352,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
27349 int rc;
27350
27351 per_cpu(current_task, cpu) = idle;
27352+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27353 #ifdef CONFIG_X86_32
27354 irq_ctx_init(cpu);
27355 #else
27356 clear_tsk_thread_flag(idle, TIF_FORK);
27357- per_cpu(kernel_stack, cpu) =
27358- (unsigned long)task_stack_page(idle) -
27359- KERNEL_STACK_OFFSET + THREAD_SIZE;
27360+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27361 #endif
27362 xen_setup_runstate_info(cpu);
27363 xen_setup_timer(cpu);
27364diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27365index f9643fc..602e8af 100644
27366--- a/arch/x86/xen/xen-asm_32.S
27367+++ b/arch/x86/xen/xen-asm_32.S
27368@@ -84,14 +84,14 @@ ENTRY(xen_iret)
27369 ESP_OFFSET=4 # bytes pushed onto stack
27370
27371 /*
27372- * Store vcpu_info pointer for easy access. Do it this way to
27373- * avoid having to reload %fs
27374+ * Store vcpu_info pointer for easy access.
27375 */
27376 #ifdef CONFIG_SMP
27377- GET_THREAD_INFO(%eax)
27378- movl TI_cpu(%eax), %eax
27379- movl __per_cpu_offset(,%eax,4), %eax
27380- mov xen_vcpu(%eax), %eax
27381+ push %fs
27382+ mov $(__KERNEL_PERCPU), %eax
27383+ mov %eax, %fs
27384+ mov PER_CPU_VAR(xen_vcpu), %eax
27385+ pop %fs
27386 #else
27387 movl xen_vcpu, %eax
27388 #endif
27389diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27390index aaa7291..3f77960 100644
27391--- a/arch/x86/xen/xen-head.S
27392+++ b/arch/x86/xen/xen-head.S
27393@@ -19,6 +19,17 @@ ENTRY(startup_xen)
27394 #ifdef CONFIG_X86_32
27395 mov %esi,xen_start_info
27396 mov $init_thread_union+THREAD_SIZE,%esp
27397+#ifdef CONFIG_SMP
27398+ movl $cpu_gdt_table,%edi
27399+ movl $__per_cpu_load,%eax
27400+ movw %ax,__KERNEL_PERCPU + 2(%edi)
27401+ rorl $16,%eax
27402+ movb %al,__KERNEL_PERCPU + 4(%edi)
27403+ movb %ah,__KERNEL_PERCPU + 7(%edi)
27404+ movl $__per_cpu_end - 1,%eax
27405+ subl $__per_cpu_start,%eax
27406+ movw %ax,__KERNEL_PERCPU + 0(%edi)
27407+#endif
27408 #else
27409 mov %rsi,xen_start_info
27410 mov $init_thread_union+THREAD_SIZE,%rsp
27411diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27412index 202d4c1..99b072a 100644
27413--- a/arch/x86/xen/xen-ops.h
27414+++ b/arch/x86/xen/xen-ops.h
27415@@ -10,8 +10,6 @@
27416 extern const char xen_hypervisor_callback[];
27417 extern const char xen_failsafe_callback[];
27418
27419-extern void *xen_initial_gdt;
27420-
27421 struct trap_info;
27422 void xen_copy_trap_info(struct trap_info *traps);
27423
27424diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27425index 525bd3d..ef888b1 100644
27426--- a/arch/xtensa/variants/dc232b/include/variant/core.h
27427+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27428@@ -119,9 +119,9 @@
27429 ----------------------------------------------------------------------*/
27430
27431 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27432-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27433 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27434 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27435+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27436
27437 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27438 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27439diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27440index 2f33760..835e50a 100644
27441--- a/arch/xtensa/variants/fsf/include/variant/core.h
27442+++ b/arch/xtensa/variants/fsf/include/variant/core.h
27443@@ -11,6 +11,7 @@
27444 #ifndef _XTENSA_CORE_H
27445 #define _XTENSA_CORE_H
27446
27447+#include <linux/const.h>
27448
27449 /****************************************************************************
27450 Parameters Useful for Any Code, USER or PRIVILEGED
27451@@ -112,9 +113,9 @@
27452 ----------------------------------------------------------------------*/
27453
27454 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27455-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27456 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27457 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27458+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27459
27460 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27461 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27462diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27463index af00795..2bb8105 100644
27464--- a/arch/xtensa/variants/s6000/include/variant/core.h
27465+++ b/arch/xtensa/variants/s6000/include/variant/core.h
27466@@ -11,6 +11,7 @@
27467 #ifndef _XTENSA_CORE_CONFIGURATION_H
27468 #define _XTENSA_CORE_CONFIGURATION_H
27469
27470+#include <linux/const.h>
27471
27472 /****************************************************************************
27473 Parameters Useful for Any Code, USER or PRIVILEGED
27474@@ -118,9 +119,9 @@
27475 ----------------------------------------------------------------------*/
27476
27477 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27478-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27479 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27480 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27481+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27482
27483 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27484 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
27485diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27486index 58916af..9cb880b 100644
27487--- a/block/blk-iopoll.c
27488+++ b/block/blk-iopoll.c
27489@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27490 }
27491 EXPORT_SYMBOL(blk_iopoll_complete);
27492
27493-static void blk_iopoll_softirq(struct softirq_action *h)
27494+static void blk_iopoll_softirq(void)
27495 {
27496 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27497 int rearm = 0, budget = blk_iopoll_budget;
27498diff --git a/block/blk-map.c b/block/blk-map.c
27499index 623e1cd..ca1e109 100644
27500--- a/block/blk-map.c
27501+++ b/block/blk-map.c
27502@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27503 if (!len || !kbuf)
27504 return -EINVAL;
27505
27506- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
27507+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
27508 if (do_copy)
27509 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27510 else
27511diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27512index 467c8de..4bddc6d 100644
27513--- a/block/blk-softirq.c
27514+++ b/block/blk-softirq.c
27515@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27516 * Softirq action handler - move entries to local list and loop over them
27517 * while passing them to the queue registered handler.
27518 */
27519-static void blk_done_softirq(struct softirq_action *h)
27520+static void blk_done_softirq(void)
27521 {
27522 struct list_head *cpu_list, local_list;
27523
27524diff --git a/block/bsg.c b/block/bsg.c
27525index ff64ae3..593560c 100644
27526--- a/block/bsg.c
27527+++ b/block/bsg.c
27528@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27529 struct sg_io_v4 *hdr, struct bsg_device *bd,
27530 fmode_t has_write_perm)
27531 {
27532+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27533+ unsigned char *cmdptr;
27534+
27535 if (hdr->request_len > BLK_MAX_CDB) {
27536 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27537 if (!rq->cmd)
27538 return -ENOMEM;
27539- }
27540+ cmdptr = rq->cmd;
27541+ } else
27542+ cmdptr = tmpcmd;
27543
27544- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
27545+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27546 hdr->request_len))
27547 return -EFAULT;
27548
27549+ if (cmdptr != rq->cmd)
27550+ memcpy(rq->cmd, cmdptr, hdr->request_len);
27551+
27552 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27553 if (blk_verify_command(rq->cmd, has_write_perm))
27554 return -EPERM;
27555diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27556index 7c668c8..db3521c 100644
27557--- a/block/compat_ioctl.c
27558+++ b/block/compat_ioctl.c
27559@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27560 err |= __get_user(f->spec1, &uf->spec1);
27561 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27562 err |= __get_user(name, &uf->name);
27563- f->name = compat_ptr(name);
27564+ f->name = (void __force_kernel *)compat_ptr(name);
27565 if (err) {
27566 err = -EFAULT;
27567 goto out;
27568diff --git a/block/partitions/efi.c b/block/partitions/efi.c
27569index 6296b40..417c00f 100644
27570--- a/block/partitions/efi.c
27571+++ b/block/partitions/efi.c
27572@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
27573 if (!gpt)
27574 return NULL;
27575
27576+ if (!le32_to_cpu(gpt->num_partition_entries))
27577+ return NULL;
27578+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
27579+ if (!pte)
27580+ return NULL;
27581+
27582 count = le32_to_cpu(gpt->num_partition_entries) *
27583 le32_to_cpu(gpt->sizeof_partition_entry);
27584- if (!count)
27585- return NULL;
27586- pte = kzalloc(count, GFP_KERNEL);
27587- if (!pte)
27588- return NULL;
27589-
27590 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
27591 (u8 *) pte,
27592 count) < count) {
27593diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27594index 9a87daa..fb17486 100644
27595--- a/block/scsi_ioctl.c
27596+++ b/block/scsi_ioctl.c
27597@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
27598 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27599 struct sg_io_hdr *hdr, fmode_t mode)
27600 {
27601- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27602+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27603+ unsigned char *cmdptr;
27604+
27605+ if (rq->cmd != rq->__cmd)
27606+ cmdptr = rq->cmd;
27607+ else
27608+ cmdptr = tmpcmd;
27609+
27610+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27611 return -EFAULT;
27612+
27613+ if (cmdptr != rq->cmd)
27614+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27615+
27616 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27617 return -EPERM;
27618
27619@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27620 int err;
27621 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27622 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27623+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27624+ unsigned char *cmdptr;
27625
27626 if (!sic)
27627 return -EINVAL;
27628@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27629 */
27630 err = -EFAULT;
27631 rq->cmd_len = cmdlen;
27632- if (copy_from_user(rq->cmd, sic->data, cmdlen))
27633+
27634+ if (rq->cmd != rq->__cmd)
27635+ cmdptr = rq->cmd;
27636+ else
27637+ cmdptr = tmpcmd;
27638+
27639+ if (copy_from_user(cmdptr, sic->data, cmdlen))
27640 goto error;
27641
27642+ if (rq->cmd != cmdptr)
27643+ memcpy(rq->cmd, cmdptr, cmdlen);
27644+
27645 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27646 goto error;
27647
27648diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27649index 671d4d6..5f24030 100644
27650--- a/crypto/cryptd.c
27651+++ b/crypto/cryptd.c
27652@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27653
27654 struct cryptd_blkcipher_request_ctx {
27655 crypto_completion_t complete;
27656-};
27657+} __no_const;
27658
27659 struct cryptd_hash_ctx {
27660 struct crypto_shash *child;
27661@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27662
27663 struct cryptd_aead_request_ctx {
27664 crypto_completion_t complete;
27665-};
27666+} __no_const;
27667
27668 static void cryptd_queue_worker(struct work_struct *work);
27669
27670diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27671index e6defd8..c26a225 100644
27672--- a/drivers/acpi/apei/cper.c
27673+++ b/drivers/acpi/apei/cper.c
27674@@ -38,12 +38,12 @@
27675 */
27676 u64 cper_next_record_id(void)
27677 {
27678- static atomic64_t seq;
27679+ static atomic64_unchecked_t seq;
27680
27681- if (!atomic64_read(&seq))
27682- atomic64_set(&seq, ((u64)get_seconds()) << 32);
27683+ if (!atomic64_read_unchecked(&seq))
27684+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27685
27686- return atomic64_inc_return(&seq);
27687+ return atomic64_inc_return_unchecked(&seq);
27688 }
27689 EXPORT_SYMBOL_GPL(cper_next_record_id);
27690
27691diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
27692index 7586544..636a2f0 100644
27693--- a/drivers/acpi/ec_sys.c
27694+++ b/drivers/acpi/ec_sys.c
27695@@ -12,6 +12,7 @@
27696 #include <linux/acpi.h>
27697 #include <linux/debugfs.h>
27698 #include <linux/module.h>
27699+#include <linux/uaccess.h>
27700 #include "internal.h"
27701
27702 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
27703@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27704 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27705 */
27706 unsigned int size = EC_SPACE_SIZE;
27707- u8 *data = (u8 *) buf;
27708+ u8 data;
27709 loff_t init_off = *off;
27710 int err = 0;
27711
27712@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27713 size = count;
27714
27715 while (size) {
27716- err = ec_read(*off, &data[*off - init_off]);
27717+ err = ec_read(*off, &data);
27718 if (err)
27719 return err;
27720+ if (put_user(data, &buf[*off - init_off]))
27721+ return -EFAULT;
27722 *off += 1;
27723 size--;
27724 }
27725@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27726
27727 unsigned int size = count;
27728 loff_t init_off = *off;
27729- u8 *data = (u8 *) buf;
27730 int err = 0;
27731
27732 if (*off >= EC_SPACE_SIZE)
27733@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27734 }
27735
27736 while (size) {
27737- u8 byte_write = data[*off - init_off];
27738+ u8 byte_write;
27739+ if (get_user(byte_write, &buf[*off - init_off]))
27740+ return -EFAULT;
27741 err = ec_write(*off, byte_write);
27742 if (err)
27743 return err;
27744diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27745index 251c7b62..000462d 100644
27746--- a/drivers/acpi/proc.c
27747+++ b/drivers/acpi/proc.c
27748@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
27749 size_t count, loff_t * ppos)
27750 {
27751 struct list_head *node, *next;
27752- char strbuf[5];
27753- char str[5] = "";
27754- unsigned int len = count;
27755+ char strbuf[5] = {0};
27756
27757- if (len > 4)
27758- len = 4;
27759- if (len < 0)
27760+ if (count > 4)
27761+ count = 4;
27762+ if (copy_from_user(strbuf, buffer, count))
27763 return -EFAULT;
27764-
27765- if (copy_from_user(strbuf, buffer, len))
27766- return -EFAULT;
27767- strbuf[len] = '\0';
27768- sscanf(strbuf, "%s", str);
27769+ strbuf[count] = '\0';
27770
27771 mutex_lock(&acpi_device_lock);
27772 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27773@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
27774 if (!dev->wakeup.flags.valid)
27775 continue;
27776
27777- if (!strncmp(dev->pnp.bus_id, str, 4)) {
27778+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27779 if (device_can_wakeup(&dev->dev)) {
27780 bool enable = !device_may_wakeup(&dev->dev);
27781 device_set_wakeup_enable(&dev->dev, enable);
27782diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
27783index bbac51e..4c094f9 100644
27784--- a/drivers/acpi/processor_driver.c
27785+++ b/drivers/acpi/processor_driver.c
27786@@ -556,7 +556,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27787 return 0;
27788 #endif
27789
27790- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27791+ BUG_ON(pr->id >= nr_cpu_ids);
27792
27793 /*
27794 * Buggy BIOS check
27795diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27796index cece3a4..0845256 100644
27797--- a/drivers/ata/libata-core.c
27798+++ b/drivers/ata/libata-core.c
27799@@ -4743,7 +4743,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27800 struct ata_port *ap;
27801 unsigned int tag;
27802
27803- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27804+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27805 ap = qc->ap;
27806
27807 qc->flags = 0;
27808@@ -4759,7 +4759,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27809 struct ata_port *ap;
27810 struct ata_link *link;
27811
27812- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27813+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27814 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27815 ap = qc->ap;
27816 link = qc->dev->link;
27817@@ -5823,6 +5823,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27818 return;
27819
27820 spin_lock(&lock);
27821+ pax_open_kernel();
27822
27823 for (cur = ops->inherits; cur; cur = cur->inherits) {
27824 void **inherit = (void **)cur;
27825@@ -5836,8 +5837,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27826 if (IS_ERR(*pp))
27827 *pp = NULL;
27828
27829- ops->inherits = NULL;
27830+ *(struct ata_port_operations **)&ops->inherits = NULL;
27831
27832+ pax_close_kernel();
27833 spin_unlock(&lock);
27834 }
27835
27836diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
27837index ac6a5be..c7176b1 100644
27838--- a/drivers/ata/pata_arasan_cf.c
27839+++ b/drivers/ata/pata_arasan_cf.c
27840@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
27841 /* Handle platform specific quirks */
27842 if (pdata->quirk) {
27843 if (pdata->quirk & CF_BROKEN_PIO) {
27844- ap->ops->set_piomode = NULL;
27845+ pax_open_kernel();
27846+ *(void **)&ap->ops->set_piomode = NULL;
27847+ pax_close_kernel();
27848 ap->pio_mask = 0;
27849 }
27850 if (pdata->quirk & CF_BROKEN_MWDMA)
27851diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
27852index f9b983a..887b9d8 100644
27853--- a/drivers/atm/adummy.c
27854+++ b/drivers/atm/adummy.c
27855@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
27856 vcc->pop(vcc, skb);
27857 else
27858 dev_kfree_skb_any(skb);
27859- atomic_inc(&vcc->stats->tx);
27860+ atomic_inc_unchecked(&vcc->stats->tx);
27861
27862 return 0;
27863 }
27864diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
27865index 89b30f3..7964211 100644
27866--- a/drivers/atm/ambassador.c
27867+++ b/drivers/atm/ambassador.c
27868@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
27869 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27870
27871 // VC layer stats
27872- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27873+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27874
27875 // free the descriptor
27876 kfree (tx_descr);
27877@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27878 dump_skb ("<<<", vc, skb);
27879
27880 // VC layer stats
27881- atomic_inc(&atm_vcc->stats->rx);
27882+ atomic_inc_unchecked(&atm_vcc->stats->rx);
27883 __net_timestamp(skb);
27884 // end of our responsibility
27885 atm_vcc->push (atm_vcc, skb);
27886@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27887 } else {
27888 PRINTK (KERN_INFO, "dropped over-size frame");
27889 // should we count this?
27890- atomic_inc(&atm_vcc->stats->rx_drop);
27891+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27892 }
27893
27894 } else {
27895@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
27896 }
27897
27898 if (check_area (skb->data, skb->len)) {
27899- atomic_inc(&atm_vcc->stats->tx_err);
27900+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27901 return -ENOMEM; // ?
27902 }
27903
27904diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
27905index b22d71c..d6e1049 100644
27906--- a/drivers/atm/atmtcp.c
27907+++ b/drivers/atm/atmtcp.c
27908@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27909 if (vcc->pop) vcc->pop(vcc,skb);
27910 else dev_kfree_skb(skb);
27911 if (dev_data) return 0;
27912- atomic_inc(&vcc->stats->tx_err);
27913+ atomic_inc_unchecked(&vcc->stats->tx_err);
27914 return -ENOLINK;
27915 }
27916 size = skb->len+sizeof(struct atmtcp_hdr);
27917@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27918 if (!new_skb) {
27919 if (vcc->pop) vcc->pop(vcc,skb);
27920 else dev_kfree_skb(skb);
27921- atomic_inc(&vcc->stats->tx_err);
27922+ atomic_inc_unchecked(&vcc->stats->tx_err);
27923 return -ENOBUFS;
27924 }
27925 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
27926@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27927 if (vcc->pop) vcc->pop(vcc,skb);
27928 else dev_kfree_skb(skb);
27929 out_vcc->push(out_vcc,new_skb);
27930- atomic_inc(&vcc->stats->tx);
27931- atomic_inc(&out_vcc->stats->rx);
27932+ atomic_inc_unchecked(&vcc->stats->tx);
27933+ atomic_inc_unchecked(&out_vcc->stats->rx);
27934 return 0;
27935 }
27936
27937@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27938 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
27939 read_unlock(&vcc_sklist_lock);
27940 if (!out_vcc) {
27941- atomic_inc(&vcc->stats->tx_err);
27942+ atomic_inc_unchecked(&vcc->stats->tx_err);
27943 goto done;
27944 }
27945 skb_pull(skb,sizeof(struct atmtcp_hdr));
27946@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27947 __net_timestamp(new_skb);
27948 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
27949 out_vcc->push(out_vcc,new_skb);
27950- atomic_inc(&vcc->stats->tx);
27951- atomic_inc(&out_vcc->stats->rx);
27952+ atomic_inc_unchecked(&vcc->stats->tx);
27953+ atomic_inc_unchecked(&out_vcc->stats->rx);
27954 done:
27955 if (vcc->pop) vcc->pop(vcc,skb);
27956 else dev_kfree_skb(skb);
27957diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
27958index 2059ee4..faf51c7 100644
27959--- a/drivers/atm/eni.c
27960+++ b/drivers/atm/eni.c
27961@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
27962 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
27963 vcc->dev->number);
27964 length = 0;
27965- atomic_inc(&vcc->stats->rx_err);
27966+ atomic_inc_unchecked(&vcc->stats->rx_err);
27967 }
27968 else {
27969 length = ATM_CELL_SIZE-1; /* no HEC */
27970@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27971 size);
27972 }
27973 eff = length = 0;
27974- atomic_inc(&vcc->stats->rx_err);
27975+ atomic_inc_unchecked(&vcc->stats->rx_err);
27976 }
27977 else {
27978 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
27979@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27980 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
27981 vcc->dev->number,vcc->vci,length,size << 2,descr);
27982 length = eff = 0;
27983- atomic_inc(&vcc->stats->rx_err);
27984+ atomic_inc_unchecked(&vcc->stats->rx_err);
27985 }
27986 }
27987 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
27988@@ -767,7 +767,7 @@ rx_dequeued++;
27989 vcc->push(vcc,skb);
27990 pushed++;
27991 }
27992- atomic_inc(&vcc->stats->rx);
27993+ atomic_inc_unchecked(&vcc->stats->rx);
27994 }
27995 wake_up(&eni_dev->rx_wait);
27996 }
27997@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
27998 PCI_DMA_TODEVICE);
27999 if (vcc->pop) vcc->pop(vcc,skb);
28000 else dev_kfree_skb_irq(skb);
28001- atomic_inc(&vcc->stats->tx);
28002+ atomic_inc_unchecked(&vcc->stats->tx);
28003 wake_up(&eni_dev->tx_wait);
28004 dma_complete++;
28005 }
28006@@ -1567,7 +1567,7 @@ tx_complete++;
28007 /*--------------------------------- entries ---------------------------------*/
28008
28009
28010-static const char *media_name[] __devinitdata = {
28011+static const char *media_name[] __devinitconst = {
28012 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
28013 "UTP", "05?", "06?", "07?", /* 4- 7 */
28014 "TAXI","09?", "10?", "11?", /* 8-11 */
28015diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
28016index 86fed1b..6dc4721 100644
28017--- a/drivers/atm/firestream.c
28018+++ b/drivers/atm/firestream.c
28019@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
28020 }
28021 }
28022
28023- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28024+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28025
28026 fs_dprintk (FS_DEBUG_TXMEM, "i");
28027 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
28028@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
28029 #endif
28030 skb_put (skb, qe->p1 & 0xffff);
28031 ATM_SKB(skb)->vcc = atm_vcc;
28032- atomic_inc(&atm_vcc->stats->rx);
28033+ atomic_inc_unchecked(&atm_vcc->stats->rx);
28034 __net_timestamp(skb);
28035 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
28036 atm_vcc->push (atm_vcc, skb);
28037@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
28038 kfree (pe);
28039 }
28040 if (atm_vcc)
28041- atomic_inc(&atm_vcc->stats->rx_drop);
28042+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
28043 break;
28044 case 0x1f: /* Reassembly abort: no buffers. */
28045 /* Silently increment error counter. */
28046 if (atm_vcc)
28047- atomic_inc(&atm_vcc->stats->rx_drop);
28048+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
28049 break;
28050 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
28051 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
28052diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
28053index 361f5ae..7fc552d 100644
28054--- a/drivers/atm/fore200e.c
28055+++ b/drivers/atm/fore200e.c
28056@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
28057 #endif
28058 /* check error condition */
28059 if (*entry->status & STATUS_ERROR)
28060- atomic_inc(&vcc->stats->tx_err);
28061+ atomic_inc_unchecked(&vcc->stats->tx_err);
28062 else
28063- atomic_inc(&vcc->stats->tx);
28064+ atomic_inc_unchecked(&vcc->stats->tx);
28065 }
28066 }
28067
28068@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
28069 if (skb == NULL) {
28070 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
28071
28072- atomic_inc(&vcc->stats->rx_drop);
28073+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28074 return -ENOMEM;
28075 }
28076
28077@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
28078
28079 dev_kfree_skb_any(skb);
28080
28081- atomic_inc(&vcc->stats->rx_drop);
28082+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28083 return -ENOMEM;
28084 }
28085
28086 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
28087
28088 vcc->push(vcc, skb);
28089- atomic_inc(&vcc->stats->rx);
28090+ atomic_inc_unchecked(&vcc->stats->rx);
28091
28092 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
28093
28094@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
28095 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
28096 fore200e->atm_dev->number,
28097 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
28098- atomic_inc(&vcc->stats->rx_err);
28099+ atomic_inc_unchecked(&vcc->stats->rx_err);
28100 }
28101 }
28102
28103@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
28104 goto retry_here;
28105 }
28106
28107- atomic_inc(&vcc->stats->tx_err);
28108+ atomic_inc_unchecked(&vcc->stats->tx_err);
28109
28110 fore200e->tx_sat++;
28111 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
28112diff --git a/drivers/atm/he.c b/drivers/atm/he.c
28113index b182c2f..1c6fa8a 100644
28114--- a/drivers/atm/he.c
28115+++ b/drivers/atm/he.c
28116@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28117
28118 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
28119 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
28120- atomic_inc(&vcc->stats->rx_drop);
28121+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28122 goto return_host_buffers;
28123 }
28124
28125@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28126 RBRQ_LEN_ERR(he_dev->rbrq_head)
28127 ? "LEN_ERR" : "",
28128 vcc->vpi, vcc->vci);
28129- atomic_inc(&vcc->stats->rx_err);
28130+ atomic_inc_unchecked(&vcc->stats->rx_err);
28131 goto return_host_buffers;
28132 }
28133
28134@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28135 vcc->push(vcc, skb);
28136 spin_lock(&he_dev->global_lock);
28137
28138- atomic_inc(&vcc->stats->rx);
28139+ atomic_inc_unchecked(&vcc->stats->rx);
28140
28141 return_host_buffers:
28142 ++pdus_assembled;
28143@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
28144 tpd->vcc->pop(tpd->vcc, tpd->skb);
28145 else
28146 dev_kfree_skb_any(tpd->skb);
28147- atomic_inc(&tpd->vcc->stats->tx_err);
28148+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
28149 }
28150 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
28151 return;
28152@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28153 vcc->pop(vcc, skb);
28154 else
28155 dev_kfree_skb_any(skb);
28156- atomic_inc(&vcc->stats->tx_err);
28157+ atomic_inc_unchecked(&vcc->stats->tx_err);
28158 return -EINVAL;
28159 }
28160
28161@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28162 vcc->pop(vcc, skb);
28163 else
28164 dev_kfree_skb_any(skb);
28165- atomic_inc(&vcc->stats->tx_err);
28166+ atomic_inc_unchecked(&vcc->stats->tx_err);
28167 return -EINVAL;
28168 }
28169 #endif
28170@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28171 vcc->pop(vcc, skb);
28172 else
28173 dev_kfree_skb_any(skb);
28174- atomic_inc(&vcc->stats->tx_err);
28175+ atomic_inc_unchecked(&vcc->stats->tx_err);
28176 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28177 return -ENOMEM;
28178 }
28179@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28180 vcc->pop(vcc, skb);
28181 else
28182 dev_kfree_skb_any(skb);
28183- atomic_inc(&vcc->stats->tx_err);
28184+ atomic_inc_unchecked(&vcc->stats->tx_err);
28185 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28186 return -ENOMEM;
28187 }
28188@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28189 __enqueue_tpd(he_dev, tpd, cid);
28190 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28191
28192- atomic_inc(&vcc->stats->tx);
28193+ atomic_inc_unchecked(&vcc->stats->tx);
28194
28195 return 0;
28196 }
28197diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
28198index 7d01c2a..4e3ac01 100644
28199--- a/drivers/atm/horizon.c
28200+++ b/drivers/atm/horizon.c
28201@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
28202 {
28203 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
28204 // VC layer stats
28205- atomic_inc(&vcc->stats->rx);
28206+ atomic_inc_unchecked(&vcc->stats->rx);
28207 __net_timestamp(skb);
28208 // end of our responsibility
28209 vcc->push (vcc, skb);
28210@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
28211 dev->tx_iovec = NULL;
28212
28213 // VC layer stats
28214- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28215+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28216
28217 // free the skb
28218 hrz_kfree_skb (skb);
28219diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
28220index 8974bd2..b856f85 100644
28221--- a/drivers/atm/idt77252.c
28222+++ b/drivers/atm/idt77252.c
28223@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
28224 else
28225 dev_kfree_skb(skb);
28226
28227- atomic_inc(&vcc->stats->tx);
28228+ atomic_inc_unchecked(&vcc->stats->tx);
28229 }
28230
28231 atomic_dec(&scq->used);
28232@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28233 if ((sb = dev_alloc_skb(64)) == NULL) {
28234 printk("%s: Can't allocate buffers for aal0.\n",
28235 card->name);
28236- atomic_add(i, &vcc->stats->rx_drop);
28237+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
28238 break;
28239 }
28240 if (!atm_charge(vcc, sb->truesize)) {
28241 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
28242 card->name);
28243- atomic_add(i - 1, &vcc->stats->rx_drop);
28244+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
28245 dev_kfree_skb(sb);
28246 break;
28247 }
28248@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28249 ATM_SKB(sb)->vcc = vcc;
28250 __net_timestamp(sb);
28251 vcc->push(vcc, sb);
28252- atomic_inc(&vcc->stats->rx);
28253+ atomic_inc_unchecked(&vcc->stats->rx);
28254
28255 cell += ATM_CELL_PAYLOAD;
28256 }
28257@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28258 "(CDC: %08x)\n",
28259 card->name, len, rpp->len, readl(SAR_REG_CDC));
28260 recycle_rx_pool_skb(card, rpp);
28261- atomic_inc(&vcc->stats->rx_err);
28262+ atomic_inc_unchecked(&vcc->stats->rx_err);
28263 return;
28264 }
28265 if (stat & SAR_RSQE_CRC) {
28266 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
28267 recycle_rx_pool_skb(card, rpp);
28268- atomic_inc(&vcc->stats->rx_err);
28269+ atomic_inc_unchecked(&vcc->stats->rx_err);
28270 return;
28271 }
28272 if (skb_queue_len(&rpp->queue) > 1) {
28273@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28274 RXPRINTK("%s: Can't alloc RX skb.\n",
28275 card->name);
28276 recycle_rx_pool_skb(card, rpp);
28277- atomic_inc(&vcc->stats->rx_err);
28278+ atomic_inc_unchecked(&vcc->stats->rx_err);
28279 return;
28280 }
28281 if (!atm_charge(vcc, skb->truesize)) {
28282@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28283 __net_timestamp(skb);
28284
28285 vcc->push(vcc, skb);
28286- atomic_inc(&vcc->stats->rx);
28287+ atomic_inc_unchecked(&vcc->stats->rx);
28288
28289 return;
28290 }
28291@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28292 __net_timestamp(skb);
28293
28294 vcc->push(vcc, skb);
28295- atomic_inc(&vcc->stats->rx);
28296+ atomic_inc_unchecked(&vcc->stats->rx);
28297
28298 if (skb->truesize > SAR_FB_SIZE_3)
28299 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
28300@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
28301 if (vcc->qos.aal != ATM_AAL0) {
28302 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
28303 card->name, vpi, vci);
28304- atomic_inc(&vcc->stats->rx_drop);
28305+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28306 goto drop;
28307 }
28308
28309 if ((sb = dev_alloc_skb(64)) == NULL) {
28310 printk("%s: Can't allocate buffers for AAL0.\n",
28311 card->name);
28312- atomic_inc(&vcc->stats->rx_err);
28313+ atomic_inc_unchecked(&vcc->stats->rx_err);
28314 goto drop;
28315 }
28316
28317@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
28318 ATM_SKB(sb)->vcc = vcc;
28319 __net_timestamp(sb);
28320 vcc->push(vcc, sb);
28321- atomic_inc(&vcc->stats->rx);
28322+ atomic_inc_unchecked(&vcc->stats->rx);
28323
28324 drop:
28325 skb_pull(queue, 64);
28326@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28327
28328 if (vc == NULL) {
28329 printk("%s: NULL connection in send().\n", card->name);
28330- atomic_inc(&vcc->stats->tx_err);
28331+ atomic_inc_unchecked(&vcc->stats->tx_err);
28332 dev_kfree_skb(skb);
28333 return -EINVAL;
28334 }
28335 if (!test_bit(VCF_TX, &vc->flags)) {
28336 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
28337- atomic_inc(&vcc->stats->tx_err);
28338+ atomic_inc_unchecked(&vcc->stats->tx_err);
28339 dev_kfree_skb(skb);
28340 return -EINVAL;
28341 }
28342@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28343 break;
28344 default:
28345 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
28346- atomic_inc(&vcc->stats->tx_err);
28347+ atomic_inc_unchecked(&vcc->stats->tx_err);
28348 dev_kfree_skb(skb);
28349 return -EINVAL;
28350 }
28351
28352 if (skb_shinfo(skb)->nr_frags != 0) {
28353 printk("%s: No scatter-gather yet.\n", card->name);
28354- atomic_inc(&vcc->stats->tx_err);
28355+ atomic_inc_unchecked(&vcc->stats->tx_err);
28356 dev_kfree_skb(skb);
28357 return -EINVAL;
28358 }
28359@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28360
28361 err = queue_skb(card, vc, skb, oam);
28362 if (err) {
28363- atomic_inc(&vcc->stats->tx_err);
28364+ atomic_inc_unchecked(&vcc->stats->tx_err);
28365 dev_kfree_skb(skb);
28366 return err;
28367 }
28368@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
28369 skb = dev_alloc_skb(64);
28370 if (!skb) {
28371 printk("%s: Out of memory in send_oam().\n", card->name);
28372- atomic_inc(&vcc->stats->tx_err);
28373+ atomic_inc_unchecked(&vcc->stats->tx_err);
28374 return -ENOMEM;
28375 }
28376 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
28377diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
28378index d438601..8b98495 100644
28379--- a/drivers/atm/iphase.c
28380+++ b/drivers/atm/iphase.c
28381@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
28382 status = (u_short) (buf_desc_ptr->desc_mode);
28383 if (status & (RX_CER | RX_PTE | RX_OFL))
28384 {
28385- atomic_inc(&vcc->stats->rx_err);
28386+ atomic_inc_unchecked(&vcc->stats->rx_err);
28387 IF_ERR(printk("IA: bad packet, dropping it");)
28388 if (status & RX_CER) {
28389 IF_ERR(printk(" cause: packet CRC error\n");)
28390@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
28391 len = dma_addr - buf_addr;
28392 if (len > iadev->rx_buf_sz) {
28393 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
28394- atomic_inc(&vcc->stats->rx_err);
28395+ atomic_inc_unchecked(&vcc->stats->rx_err);
28396 goto out_free_desc;
28397 }
28398
28399@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28400 ia_vcc = INPH_IA_VCC(vcc);
28401 if (ia_vcc == NULL)
28402 {
28403- atomic_inc(&vcc->stats->rx_err);
28404+ atomic_inc_unchecked(&vcc->stats->rx_err);
28405 atm_return(vcc, skb->truesize);
28406 dev_kfree_skb_any(skb);
28407 goto INCR_DLE;
28408@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28409 if ((length > iadev->rx_buf_sz) || (length >
28410 (skb->len - sizeof(struct cpcs_trailer))))
28411 {
28412- atomic_inc(&vcc->stats->rx_err);
28413+ atomic_inc_unchecked(&vcc->stats->rx_err);
28414 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
28415 length, skb->len);)
28416 atm_return(vcc, skb->truesize);
28417@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28418
28419 IF_RX(printk("rx_dle_intr: skb push");)
28420 vcc->push(vcc,skb);
28421- atomic_inc(&vcc->stats->rx);
28422+ atomic_inc_unchecked(&vcc->stats->rx);
28423 iadev->rx_pkt_cnt++;
28424 }
28425 INCR_DLE:
28426@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
28427 {
28428 struct k_sonet_stats *stats;
28429 stats = &PRIV(_ia_dev[board])->sonet_stats;
28430- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
28431- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
28432- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
28433- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
28434- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
28435- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
28436- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
28437- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
28438- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
28439+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
28440+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
28441+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
28442+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
28443+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
28444+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
28445+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
28446+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
28447+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
28448 }
28449 ia_cmds.status = 0;
28450 break;
28451@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28452 if ((desc == 0) || (desc > iadev->num_tx_desc))
28453 {
28454 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
28455- atomic_inc(&vcc->stats->tx);
28456+ atomic_inc_unchecked(&vcc->stats->tx);
28457 if (vcc->pop)
28458 vcc->pop(vcc, skb);
28459 else
28460@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28461 ATM_DESC(skb) = vcc->vci;
28462 skb_queue_tail(&iadev->tx_dma_q, skb);
28463
28464- atomic_inc(&vcc->stats->tx);
28465+ atomic_inc_unchecked(&vcc->stats->tx);
28466 iadev->tx_pkt_cnt++;
28467 /* Increment transaction counter */
28468 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
28469
28470 #if 0
28471 /* add flow control logic */
28472- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
28473+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
28474 if (iavcc->vc_desc_cnt > 10) {
28475 vcc->tx_quota = vcc->tx_quota * 3 / 4;
28476 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
28477diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
28478index 68c7588..7036683 100644
28479--- a/drivers/atm/lanai.c
28480+++ b/drivers/atm/lanai.c
28481@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
28482 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
28483 lanai_endtx(lanai, lvcc);
28484 lanai_free_skb(lvcc->tx.atmvcc, skb);
28485- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28486+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28487 }
28488
28489 /* Try to fill the buffer - don't call unless there is backlog */
28490@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
28491 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28492 __net_timestamp(skb);
28493 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28494- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28495+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28496 out:
28497 lvcc->rx.buf.ptr = end;
28498 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
28499@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28500 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28501 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28502 lanai->stats.service_rxnotaal5++;
28503- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28504+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28505 return 0;
28506 }
28507 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
28508@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28509 int bytes;
28510 read_unlock(&vcc_sklist_lock);
28511 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28512- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28513+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28514 lvcc->stats.x.aal5.service_trash++;
28515 bytes = (SERVICE_GET_END(s) * 16) -
28516 (((unsigned long) lvcc->rx.buf.ptr) -
28517@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28518 }
28519 if (s & SERVICE_STREAM) {
28520 read_unlock(&vcc_sklist_lock);
28521- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28522+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28523 lvcc->stats.x.aal5.service_stream++;
28524 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28525 "PDU on VCI %d!\n", lanai->number, vci);
28526@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28527 return 0;
28528 }
28529 DPRINTK("got rx crc error on vci %d\n", vci);
28530- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28531+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28532 lvcc->stats.x.aal5.service_rxcrc++;
28533 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28534 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
28535diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28536index 1c70c45..300718d 100644
28537--- a/drivers/atm/nicstar.c
28538+++ b/drivers/atm/nicstar.c
28539@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28540 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28541 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28542 card->index);
28543- atomic_inc(&vcc->stats->tx_err);
28544+ atomic_inc_unchecked(&vcc->stats->tx_err);
28545 dev_kfree_skb_any(skb);
28546 return -EINVAL;
28547 }
28548@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28549 if (!vc->tx) {
28550 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28551 card->index);
28552- atomic_inc(&vcc->stats->tx_err);
28553+ atomic_inc_unchecked(&vcc->stats->tx_err);
28554 dev_kfree_skb_any(skb);
28555 return -EINVAL;
28556 }
28557@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28558 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28559 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28560 card->index);
28561- atomic_inc(&vcc->stats->tx_err);
28562+ atomic_inc_unchecked(&vcc->stats->tx_err);
28563 dev_kfree_skb_any(skb);
28564 return -EINVAL;
28565 }
28566
28567 if (skb_shinfo(skb)->nr_frags != 0) {
28568 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28569- atomic_inc(&vcc->stats->tx_err);
28570+ atomic_inc_unchecked(&vcc->stats->tx_err);
28571 dev_kfree_skb_any(skb);
28572 return -EINVAL;
28573 }
28574@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28575 }
28576
28577 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28578- atomic_inc(&vcc->stats->tx_err);
28579+ atomic_inc_unchecked(&vcc->stats->tx_err);
28580 dev_kfree_skb_any(skb);
28581 return -EIO;
28582 }
28583- atomic_inc(&vcc->stats->tx);
28584+ atomic_inc_unchecked(&vcc->stats->tx);
28585
28586 return 0;
28587 }
28588@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28589 printk
28590 ("nicstar%d: Can't allocate buffers for aal0.\n",
28591 card->index);
28592- atomic_add(i, &vcc->stats->rx_drop);
28593+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
28594 break;
28595 }
28596 if (!atm_charge(vcc, sb->truesize)) {
28597 RXPRINTK
28598 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28599 card->index);
28600- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28601+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28602 dev_kfree_skb_any(sb);
28603 break;
28604 }
28605@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28606 ATM_SKB(sb)->vcc = vcc;
28607 __net_timestamp(sb);
28608 vcc->push(vcc, sb);
28609- atomic_inc(&vcc->stats->rx);
28610+ atomic_inc_unchecked(&vcc->stats->rx);
28611 cell += ATM_CELL_PAYLOAD;
28612 }
28613
28614@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28615 if (iovb == NULL) {
28616 printk("nicstar%d: Out of iovec buffers.\n",
28617 card->index);
28618- atomic_inc(&vcc->stats->rx_drop);
28619+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28620 recycle_rx_buf(card, skb);
28621 return;
28622 }
28623@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28624 small or large buffer itself. */
28625 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28626 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28627- atomic_inc(&vcc->stats->rx_err);
28628+ atomic_inc_unchecked(&vcc->stats->rx_err);
28629 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28630 NS_MAX_IOVECS);
28631 NS_PRV_IOVCNT(iovb) = 0;
28632@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28633 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28634 card->index);
28635 which_list(card, skb);
28636- atomic_inc(&vcc->stats->rx_err);
28637+ atomic_inc_unchecked(&vcc->stats->rx_err);
28638 recycle_rx_buf(card, skb);
28639 vc->rx_iov = NULL;
28640 recycle_iov_buf(card, iovb);
28641@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28642 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28643 card->index);
28644 which_list(card, skb);
28645- atomic_inc(&vcc->stats->rx_err);
28646+ atomic_inc_unchecked(&vcc->stats->rx_err);
28647 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28648 NS_PRV_IOVCNT(iovb));
28649 vc->rx_iov = NULL;
28650@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28651 printk(" - PDU size mismatch.\n");
28652 else
28653 printk(".\n");
28654- atomic_inc(&vcc->stats->rx_err);
28655+ atomic_inc_unchecked(&vcc->stats->rx_err);
28656 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28657 NS_PRV_IOVCNT(iovb));
28658 vc->rx_iov = NULL;
28659@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28660 /* skb points to a small buffer */
28661 if (!atm_charge(vcc, skb->truesize)) {
28662 push_rxbufs(card, skb);
28663- atomic_inc(&vcc->stats->rx_drop);
28664+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28665 } else {
28666 skb_put(skb, len);
28667 dequeue_sm_buf(card, skb);
28668@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28669 ATM_SKB(skb)->vcc = vcc;
28670 __net_timestamp(skb);
28671 vcc->push(vcc, skb);
28672- atomic_inc(&vcc->stats->rx);
28673+ atomic_inc_unchecked(&vcc->stats->rx);
28674 }
28675 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28676 struct sk_buff *sb;
28677@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28678 if (len <= NS_SMBUFSIZE) {
28679 if (!atm_charge(vcc, sb->truesize)) {
28680 push_rxbufs(card, sb);
28681- atomic_inc(&vcc->stats->rx_drop);
28682+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28683 } else {
28684 skb_put(sb, len);
28685 dequeue_sm_buf(card, sb);
28686@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28687 ATM_SKB(sb)->vcc = vcc;
28688 __net_timestamp(sb);
28689 vcc->push(vcc, sb);
28690- atomic_inc(&vcc->stats->rx);
28691+ atomic_inc_unchecked(&vcc->stats->rx);
28692 }
28693
28694 push_rxbufs(card, skb);
28695@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28696
28697 if (!atm_charge(vcc, skb->truesize)) {
28698 push_rxbufs(card, skb);
28699- atomic_inc(&vcc->stats->rx_drop);
28700+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28701 } else {
28702 dequeue_lg_buf(card, skb);
28703 #ifdef NS_USE_DESTRUCTORS
28704@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28705 ATM_SKB(skb)->vcc = vcc;
28706 __net_timestamp(skb);
28707 vcc->push(vcc, skb);
28708- atomic_inc(&vcc->stats->rx);
28709+ atomic_inc_unchecked(&vcc->stats->rx);
28710 }
28711
28712 push_rxbufs(card, sb);
28713@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28714 printk
28715 ("nicstar%d: Out of huge buffers.\n",
28716 card->index);
28717- atomic_inc(&vcc->stats->rx_drop);
28718+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28719 recycle_iovec_rx_bufs(card,
28720 (struct iovec *)
28721 iovb->data,
28722@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28723 card->hbpool.count++;
28724 } else
28725 dev_kfree_skb_any(hb);
28726- atomic_inc(&vcc->stats->rx_drop);
28727+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28728 } else {
28729 /* Copy the small buffer to the huge buffer */
28730 sb = (struct sk_buff *)iov->iov_base;
28731@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28732 #endif /* NS_USE_DESTRUCTORS */
28733 __net_timestamp(hb);
28734 vcc->push(vcc, hb);
28735- atomic_inc(&vcc->stats->rx);
28736+ atomic_inc_unchecked(&vcc->stats->rx);
28737 }
28738 }
28739
28740diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
28741index 9851093..adb2b1e 100644
28742--- a/drivers/atm/solos-pci.c
28743+++ b/drivers/atm/solos-pci.c
28744@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
28745 }
28746 atm_charge(vcc, skb->truesize);
28747 vcc->push(vcc, skb);
28748- atomic_inc(&vcc->stats->rx);
28749+ atomic_inc_unchecked(&vcc->stats->rx);
28750 break;
28751
28752 case PKT_STATUS:
28753@@ -1009,7 +1009,7 @@ static uint32_t fpga_tx(struct solos_card *card)
28754 vcc = SKB_CB(oldskb)->vcc;
28755
28756 if (vcc) {
28757- atomic_inc(&vcc->stats->tx);
28758+ atomic_inc_unchecked(&vcc->stats->tx);
28759 solos_pop(vcc, oldskb);
28760 } else
28761 dev_kfree_skb_irq(oldskb);
28762diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28763index 0215934..ce9f5b1 100644
28764--- a/drivers/atm/suni.c
28765+++ b/drivers/atm/suni.c
28766@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
28767
28768
28769 #define ADD_LIMITED(s,v) \
28770- atomic_add((v),&stats->s); \
28771- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28772+ atomic_add_unchecked((v),&stats->s); \
28773+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28774
28775
28776 static void suni_hz(unsigned long from_timer)
28777diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28778index 5120a96..e2572bd 100644
28779--- a/drivers/atm/uPD98402.c
28780+++ b/drivers/atm/uPD98402.c
28781@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
28782 struct sonet_stats tmp;
28783 int error = 0;
28784
28785- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28786+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28787 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28788 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28789 if (zero && !error) {
28790@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
28791
28792
28793 #define ADD_LIMITED(s,v) \
28794- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28795- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28796- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28797+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28798+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28799+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28800
28801
28802 static void stat_event(struct atm_dev *dev)
28803@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
28804 if (reason & uPD98402_INT_PFM) stat_event(dev);
28805 if (reason & uPD98402_INT_PCO) {
28806 (void) GET(PCOCR); /* clear interrupt cause */
28807- atomic_add(GET(HECCT),
28808+ atomic_add_unchecked(GET(HECCT),
28809 &PRIV(dev)->sonet_stats.uncorr_hcs);
28810 }
28811 if ((reason & uPD98402_INT_RFO) &&
28812@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
28813 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28814 uPD98402_INT_LOS),PIMR); /* enable them */
28815 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28816- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28817- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28818- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28819+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28820+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28821+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28822 return 0;
28823 }
28824
28825diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
28826index abe4e20..83c4727 100644
28827--- a/drivers/atm/zatm.c
28828+++ b/drivers/atm/zatm.c
28829@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28830 }
28831 if (!size) {
28832 dev_kfree_skb_irq(skb);
28833- if (vcc) atomic_inc(&vcc->stats->rx_err);
28834+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28835 continue;
28836 }
28837 if (!atm_charge(vcc,skb->truesize)) {
28838@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28839 skb->len = size;
28840 ATM_SKB(skb)->vcc = vcc;
28841 vcc->push(vcc,skb);
28842- atomic_inc(&vcc->stats->rx);
28843+ atomic_inc_unchecked(&vcc->stats->rx);
28844 }
28845 zout(pos & 0xffff,MTA(mbx));
28846 #if 0 /* probably a stupid idea */
28847@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
28848 skb_queue_head(&zatm_vcc->backlog,skb);
28849 break;
28850 }
28851- atomic_inc(&vcc->stats->tx);
28852+ atomic_inc_unchecked(&vcc->stats->tx);
28853 wake_up(&zatm_vcc->tx_wait);
28854 }
28855
28856diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
28857index 765c3a2..771ace6 100644
28858--- a/drivers/base/devtmpfs.c
28859+++ b/drivers/base/devtmpfs.c
28860@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
28861 if (!thread)
28862 return 0;
28863
28864- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
28865+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
28866 if (err)
28867 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
28868 else
28869diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
28870index cbb463b..babe2cf 100644
28871--- a/drivers/base/power/wakeup.c
28872+++ b/drivers/base/power/wakeup.c
28873@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
28874 * They need to be modified together atomically, so it's better to use one
28875 * atomic variable to hold them both.
28876 */
28877-static atomic_t combined_event_count = ATOMIC_INIT(0);
28878+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
28879
28880 #define IN_PROGRESS_BITS (sizeof(int) * 4)
28881 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
28882
28883 static void split_counters(unsigned int *cnt, unsigned int *inpr)
28884 {
28885- unsigned int comb = atomic_read(&combined_event_count);
28886+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
28887
28888 *cnt = (comb >> IN_PROGRESS_BITS);
28889 *inpr = comb & MAX_IN_PROGRESS;
28890@@ -385,7 +385,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
28891 ws->start_prevent_time = ws->last_time;
28892
28893 /* Increment the counter of events in progress. */
28894- cec = atomic_inc_return(&combined_event_count);
28895+ cec = atomic_inc_return_unchecked(&combined_event_count);
28896
28897 trace_wakeup_source_activate(ws->name, cec);
28898 }
28899@@ -511,7 +511,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
28900 * Increment the counter of registered wakeup events and decrement the
28901 * couter of wakeup events in progress simultaneously.
28902 */
28903- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
28904+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
28905 trace_wakeup_source_deactivate(ws->name, cec);
28906
28907 split_counters(&cnt, &inpr);
28908diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
28909index b0f553b..77b928b 100644
28910--- a/drivers/block/cciss.c
28911+++ b/drivers/block/cciss.c
28912@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
28913 int err;
28914 u32 cp;
28915
28916+ memset(&arg64, 0, sizeof(arg64));
28917+
28918 err = 0;
28919 err |=
28920 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
28921@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
28922 while (!list_empty(&h->reqQ)) {
28923 c = list_entry(h->reqQ.next, CommandList_struct, list);
28924 /* can't do anything if fifo is full */
28925- if ((h->access.fifo_full(h))) {
28926+ if ((h->access->fifo_full(h))) {
28927 dev_warn(&h->pdev->dev, "fifo full\n");
28928 break;
28929 }
28930@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
28931 h->Qdepth--;
28932
28933 /* Tell the controller execute command */
28934- h->access.submit_command(h, c);
28935+ h->access->submit_command(h, c);
28936
28937 /* Put job onto the completed Q */
28938 addQ(&h->cmpQ, c);
28939@@ -3443,17 +3445,17 @@ startio:
28940
28941 static inline unsigned long get_next_completion(ctlr_info_t *h)
28942 {
28943- return h->access.command_completed(h);
28944+ return h->access->command_completed(h);
28945 }
28946
28947 static inline int interrupt_pending(ctlr_info_t *h)
28948 {
28949- return h->access.intr_pending(h);
28950+ return h->access->intr_pending(h);
28951 }
28952
28953 static inline long interrupt_not_for_us(ctlr_info_t *h)
28954 {
28955- return ((h->access.intr_pending(h) == 0) ||
28956+ return ((h->access->intr_pending(h) == 0) ||
28957 (h->interrupts_enabled == 0));
28958 }
28959
28960@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
28961 u32 a;
28962
28963 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
28964- return h->access.command_completed(h);
28965+ return h->access->command_completed(h);
28966
28967 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
28968 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
28969@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
28970 trans_support & CFGTBL_Trans_use_short_tags);
28971
28972 /* Change the access methods to the performant access methods */
28973- h->access = SA5_performant_access;
28974+ h->access = &SA5_performant_access;
28975 h->transMethod = CFGTBL_Trans_Performant;
28976
28977 return;
28978@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
28979 if (prod_index < 0)
28980 return -ENODEV;
28981 h->product_name = products[prod_index].product_name;
28982- h->access = *(products[prod_index].access);
28983+ h->access = products[prod_index].access;
28984
28985 if (cciss_board_disabled(h)) {
28986 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
28987@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
28988 }
28989
28990 /* make sure the board interrupts are off */
28991- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28992+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28993 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
28994 if (rc)
28995 goto clean2;
28996@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
28997 * fake ones to scoop up any residual completions.
28998 */
28999 spin_lock_irqsave(&h->lock, flags);
29000- h->access.set_intr_mask(h, CCISS_INTR_OFF);
29001+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
29002 spin_unlock_irqrestore(&h->lock, flags);
29003 free_irq(h->intr[h->intr_mode], h);
29004 rc = cciss_request_irq(h, cciss_msix_discard_completions,
29005@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
29006 dev_info(&h->pdev->dev, "Board READY.\n");
29007 dev_info(&h->pdev->dev,
29008 "Waiting for stale completions to drain.\n");
29009- h->access.set_intr_mask(h, CCISS_INTR_ON);
29010+ h->access->set_intr_mask(h, CCISS_INTR_ON);
29011 msleep(10000);
29012- h->access.set_intr_mask(h, CCISS_INTR_OFF);
29013+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
29014
29015 rc = controller_reset_failed(h->cfgtable);
29016 if (rc)
29017@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
29018 cciss_scsi_setup(h);
29019
29020 /* Turn the interrupts on so we can service requests */
29021- h->access.set_intr_mask(h, CCISS_INTR_ON);
29022+ h->access->set_intr_mask(h, CCISS_INTR_ON);
29023
29024 /* Get the firmware version */
29025 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
29026@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
29027 kfree(flush_buf);
29028 if (return_code != IO_OK)
29029 dev_warn(&h->pdev->dev, "Error flushing cache\n");
29030- h->access.set_intr_mask(h, CCISS_INTR_OFF);
29031+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
29032 free_irq(h->intr[h->intr_mode], h);
29033 }
29034
29035diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
29036index 7fda30e..eb5dfe0 100644
29037--- a/drivers/block/cciss.h
29038+++ b/drivers/block/cciss.h
29039@@ -101,7 +101,7 @@ struct ctlr_info
29040 /* information about each logical volume */
29041 drive_info_struct *drv[CISS_MAX_LUN];
29042
29043- struct access_method access;
29044+ struct access_method *access;
29045
29046 /* queue and queue Info */
29047 struct list_head reqQ;
29048diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
29049index 9125bbe..eede5c8 100644
29050--- a/drivers/block/cpqarray.c
29051+++ b/drivers/block/cpqarray.c
29052@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
29053 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
29054 goto Enomem4;
29055 }
29056- hba[i]->access.set_intr_mask(hba[i], 0);
29057+ hba[i]->access->set_intr_mask(hba[i], 0);
29058 if (request_irq(hba[i]->intr, do_ida_intr,
29059 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
29060 {
29061@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
29062 add_timer(&hba[i]->timer);
29063
29064 /* Enable IRQ now that spinlock and rate limit timer are set up */
29065- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29066+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29067
29068 for(j=0; j<NWD; j++) {
29069 struct gendisk *disk = ida_gendisk[i][j];
29070@@ -694,7 +694,7 @@ DBGINFO(
29071 for(i=0; i<NR_PRODUCTS; i++) {
29072 if (board_id == products[i].board_id) {
29073 c->product_name = products[i].product_name;
29074- c->access = *(products[i].access);
29075+ c->access = products[i].access;
29076 break;
29077 }
29078 }
29079@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
29080 hba[ctlr]->intr = intr;
29081 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
29082 hba[ctlr]->product_name = products[j].product_name;
29083- hba[ctlr]->access = *(products[j].access);
29084+ hba[ctlr]->access = products[j].access;
29085 hba[ctlr]->ctlr = ctlr;
29086 hba[ctlr]->board_id = board_id;
29087 hba[ctlr]->pci_dev = NULL; /* not PCI */
29088@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
29089
29090 while((c = h->reqQ) != NULL) {
29091 /* Can't do anything if we're busy */
29092- if (h->access.fifo_full(h) == 0)
29093+ if (h->access->fifo_full(h) == 0)
29094 return;
29095
29096 /* Get the first entry from the request Q */
29097@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
29098 h->Qdepth--;
29099
29100 /* Tell the controller to do our bidding */
29101- h->access.submit_command(h, c);
29102+ h->access->submit_command(h, c);
29103
29104 /* Get onto the completion Q */
29105 addQ(&h->cmpQ, c);
29106@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
29107 unsigned long flags;
29108 __u32 a,a1;
29109
29110- istat = h->access.intr_pending(h);
29111+ istat = h->access->intr_pending(h);
29112 /* Is this interrupt for us? */
29113 if (istat == 0)
29114 return IRQ_NONE;
29115@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
29116 */
29117 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
29118 if (istat & FIFO_NOT_EMPTY) {
29119- while((a = h->access.command_completed(h))) {
29120+ while((a = h->access->command_completed(h))) {
29121 a1 = a; a &= ~3;
29122 if ((c = h->cmpQ) == NULL)
29123 {
29124@@ -1449,11 +1449,11 @@ static int sendcmd(
29125 /*
29126 * Disable interrupt
29127 */
29128- info_p->access.set_intr_mask(info_p, 0);
29129+ info_p->access->set_intr_mask(info_p, 0);
29130 /* Make sure there is room in the command FIFO */
29131 /* Actually it should be completely empty at this time. */
29132 for (i = 200000; i > 0; i--) {
29133- temp = info_p->access.fifo_full(info_p);
29134+ temp = info_p->access->fifo_full(info_p);
29135 if (temp != 0) {
29136 break;
29137 }
29138@@ -1466,7 +1466,7 @@ DBG(
29139 /*
29140 * Send the cmd
29141 */
29142- info_p->access.submit_command(info_p, c);
29143+ info_p->access->submit_command(info_p, c);
29144 complete = pollcomplete(ctlr);
29145
29146 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
29147@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
29148 * we check the new geometry. Then turn interrupts back on when
29149 * we're done.
29150 */
29151- host->access.set_intr_mask(host, 0);
29152+ host->access->set_intr_mask(host, 0);
29153 getgeometry(ctlr);
29154- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
29155+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
29156
29157 for(i=0; i<NWD; i++) {
29158 struct gendisk *disk = ida_gendisk[ctlr][i];
29159@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
29160 /* Wait (up to 2 seconds) for a command to complete */
29161
29162 for (i = 200000; i > 0; i--) {
29163- done = hba[ctlr]->access.command_completed(hba[ctlr]);
29164+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
29165 if (done == 0) {
29166 udelay(10); /* a short fixed delay */
29167 } else
29168diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
29169index be73e9d..7fbf140 100644
29170--- a/drivers/block/cpqarray.h
29171+++ b/drivers/block/cpqarray.h
29172@@ -99,7 +99,7 @@ struct ctlr_info {
29173 drv_info_t drv[NWD];
29174 struct proc_dir_entry *proc;
29175
29176- struct access_method access;
29177+ struct access_method *access;
29178
29179 cmdlist_t *reqQ;
29180 cmdlist_t *cmpQ;
29181diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
29182index 02f013a..afeba24 100644
29183--- a/drivers/block/drbd/drbd_int.h
29184+++ b/drivers/block/drbd/drbd_int.h
29185@@ -735,7 +735,7 @@ struct drbd_request;
29186 struct drbd_epoch {
29187 struct list_head list;
29188 unsigned int barrier_nr;
29189- atomic_t epoch_size; /* increased on every request added. */
29190+ atomic_unchecked_t epoch_size; /* increased on every request added. */
29191 atomic_t active; /* increased on every req. added, and dec on every finished. */
29192 unsigned long flags;
29193 };
29194@@ -1110,7 +1110,7 @@ struct drbd_conf {
29195 void *int_dig_in;
29196 void *int_dig_vv;
29197 wait_queue_head_t seq_wait;
29198- atomic_t packet_seq;
29199+ atomic_unchecked_t packet_seq;
29200 unsigned int peer_seq;
29201 spinlock_t peer_seq_lock;
29202 unsigned int minor;
29203@@ -1651,30 +1651,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
29204
29205 static inline void drbd_tcp_cork(struct socket *sock)
29206 {
29207- int __user val = 1;
29208+ int val = 1;
29209 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29210- (char __user *)&val, sizeof(val));
29211+ (char __force_user *)&val, sizeof(val));
29212 }
29213
29214 static inline void drbd_tcp_uncork(struct socket *sock)
29215 {
29216- int __user val = 0;
29217+ int val = 0;
29218 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29219- (char __user *)&val, sizeof(val));
29220+ (char __force_user *)&val, sizeof(val));
29221 }
29222
29223 static inline void drbd_tcp_nodelay(struct socket *sock)
29224 {
29225- int __user val = 1;
29226+ int val = 1;
29227 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
29228- (char __user *)&val, sizeof(val));
29229+ (char __force_user *)&val, sizeof(val));
29230 }
29231
29232 static inline void drbd_tcp_quickack(struct socket *sock)
29233 {
29234- int __user val = 2;
29235+ int val = 2;
29236 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
29237- (char __user *)&val, sizeof(val));
29238+ (char __force_user *)&val, sizeof(val));
29239 }
29240
29241 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
29242diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
29243index 920ede2..cb827ba 100644
29244--- a/drivers/block/drbd/drbd_main.c
29245+++ b/drivers/block/drbd/drbd_main.c
29246@@ -2555,7 +2555,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
29247 p.sector = sector;
29248 p.block_id = block_id;
29249 p.blksize = blksize;
29250- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29251+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29252
29253 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
29254 return false;
29255@@ -2853,7 +2853,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
29256
29257 p.sector = cpu_to_be64(req->sector);
29258 p.block_id = (unsigned long)req;
29259- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29260+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29261
29262 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
29263
29264@@ -3138,7 +3138,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
29265 atomic_set(&mdev->unacked_cnt, 0);
29266 atomic_set(&mdev->local_cnt, 0);
29267 atomic_set(&mdev->net_cnt, 0);
29268- atomic_set(&mdev->packet_seq, 0);
29269+ atomic_set_unchecked(&mdev->packet_seq, 0);
29270 atomic_set(&mdev->pp_in_use, 0);
29271 atomic_set(&mdev->pp_in_use_by_net, 0);
29272 atomic_set(&mdev->rs_sect_in, 0);
29273@@ -3220,8 +3220,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
29274 mdev->receiver.t_state);
29275
29276 /* no need to lock it, I'm the only thread alive */
29277- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
29278- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
29279+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
29280+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
29281 mdev->al_writ_cnt =
29282 mdev->bm_writ_cnt =
29283 mdev->read_cnt =
29284diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
29285index 6d4de6a..7b7ad4b 100644
29286--- a/drivers/block/drbd/drbd_nl.c
29287+++ b/drivers/block/drbd/drbd_nl.c
29288@@ -2387,7 +2387,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
29289 module_put(THIS_MODULE);
29290 }
29291
29292-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29293+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29294
29295 static unsigned short *
29296 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
29297@@ -2458,7 +2458,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
29298 cn_reply->id.idx = CN_IDX_DRBD;
29299 cn_reply->id.val = CN_VAL_DRBD;
29300
29301- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29302+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29303 cn_reply->ack = 0; /* not used here. */
29304 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29305 (int)((char *)tl - (char *)reply->tag_list);
29306@@ -2490,7 +2490,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
29307 cn_reply->id.idx = CN_IDX_DRBD;
29308 cn_reply->id.val = CN_VAL_DRBD;
29309
29310- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29311+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29312 cn_reply->ack = 0; /* not used here. */
29313 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29314 (int)((char *)tl - (char *)reply->tag_list);
29315@@ -2568,7 +2568,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
29316 cn_reply->id.idx = CN_IDX_DRBD;
29317 cn_reply->id.val = CN_VAL_DRBD;
29318
29319- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
29320+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
29321 cn_reply->ack = 0; // not used here.
29322 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29323 (int)((char*)tl - (char*)reply->tag_list);
29324@@ -2607,7 +2607,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
29325 cn_reply->id.idx = CN_IDX_DRBD;
29326 cn_reply->id.val = CN_VAL_DRBD;
29327
29328- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29329+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29330 cn_reply->ack = 0; /* not used here. */
29331 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29332 (int)((char *)tl - (char *)reply->tag_list);
29333diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
29334index ea4836e..272d72a 100644
29335--- a/drivers/block/drbd/drbd_receiver.c
29336+++ b/drivers/block/drbd/drbd_receiver.c
29337@@ -893,7 +893,7 @@ retry:
29338 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
29339 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
29340
29341- atomic_set(&mdev->packet_seq, 0);
29342+ atomic_set_unchecked(&mdev->packet_seq, 0);
29343 mdev->peer_seq = 0;
29344
29345 if (drbd_send_protocol(mdev) == -1)
29346@@ -994,7 +994,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29347 do {
29348 next_epoch = NULL;
29349
29350- epoch_size = atomic_read(&epoch->epoch_size);
29351+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
29352
29353 switch (ev & ~EV_CLEANUP) {
29354 case EV_PUT:
29355@@ -1030,7 +1030,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29356 rv = FE_DESTROYED;
29357 } else {
29358 epoch->flags = 0;
29359- atomic_set(&epoch->epoch_size, 0);
29360+ atomic_set_unchecked(&epoch->epoch_size, 0);
29361 /* atomic_set(&epoch->active, 0); is already zero */
29362 if (rv == FE_STILL_LIVE)
29363 rv = FE_RECYCLED;
29364@@ -1205,14 +1205,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29365 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
29366 drbd_flush(mdev);
29367
29368- if (atomic_read(&mdev->current_epoch->epoch_size)) {
29369+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29370 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
29371 if (epoch)
29372 break;
29373 }
29374
29375 epoch = mdev->current_epoch;
29376- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
29377+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
29378
29379 D_ASSERT(atomic_read(&epoch->active) == 0);
29380 D_ASSERT(epoch->flags == 0);
29381@@ -1224,11 +1224,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29382 }
29383
29384 epoch->flags = 0;
29385- atomic_set(&epoch->epoch_size, 0);
29386+ atomic_set_unchecked(&epoch->epoch_size, 0);
29387 atomic_set(&epoch->active, 0);
29388
29389 spin_lock(&mdev->epoch_lock);
29390- if (atomic_read(&mdev->current_epoch->epoch_size)) {
29391+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29392 list_add(&epoch->list, &mdev->current_epoch->list);
29393 mdev->current_epoch = epoch;
29394 mdev->epochs++;
29395@@ -1695,7 +1695,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29396 spin_unlock(&mdev->peer_seq_lock);
29397
29398 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
29399- atomic_inc(&mdev->current_epoch->epoch_size);
29400+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
29401 return drbd_drain_block(mdev, data_size);
29402 }
29403
29404@@ -1721,7 +1721,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29405
29406 spin_lock(&mdev->epoch_lock);
29407 e->epoch = mdev->current_epoch;
29408- atomic_inc(&e->epoch->epoch_size);
29409+ atomic_inc_unchecked(&e->epoch->epoch_size);
29410 atomic_inc(&e->epoch->active);
29411 spin_unlock(&mdev->epoch_lock);
29412
29413@@ -3936,7 +3936,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
29414 D_ASSERT(list_empty(&mdev->done_ee));
29415
29416 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
29417- atomic_set(&mdev->current_epoch->epoch_size, 0);
29418+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
29419 D_ASSERT(list_empty(&mdev->current_epoch->list));
29420 }
29421
29422diff --git a/drivers/block/loop.c b/drivers/block/loop.c
29423index 3bba655..6151b66 100644
29424--- a/drivers/block/loop.c
29425+++ b/drivers/block/loop.c
29426@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
29427 mm_segment_t old_fs = get_fs();
29428
29429 set_fs(get_ds());
29430- bw = file->f_op->write(file, buf, len, &pos);
29431+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
29432 set_fs(old_fs);
29433 if (likely(bw == len))
29434 return 0;
29435diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
29436index ea6f632..eafb192 100644
29437--- a/drivers/char/Kconfig
29438+++ b/drivers/char/Kconfig
29439@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
29440
29441 config DEVKMEM
29442 bool "/dev/kmem virtual device support"
29443- default y
29444+ default n
29445+ depends on !GRKERNSEC_KMEM
29446 help
29447 Say Y here if you want to support the /dev/kmem device. The
29448 /dev/kmem device is rarely used, but can be used for certain
29449@@ -581,6 +582,7 @@ config DEVPORT
29450 bool
29451 depends on !M68K
29452 depends on ISA || PCI
29453+ depends on !GRKERNSEC_KMEM
29454 default y
29455
29456 source "drivers/s390/char/Kconfig"
29457diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
29458index 2e04433..22afc64 100644
29459--- a/drivers/char/agp/frontend.c
29460+++ b/drivers/char/agp/frontend.c
29461@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
29462 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
29463 return -EFAULT;
29464
29465- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
29466+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
29467 return -EFAULT;
29468
29469 client = agp_find_client_by_pid(reserve.pid);
29470diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29471index 21cb980..f15107c 100644
29472--- a/drivers/char/genrtc.c
29473+++ b/drivers/char/genrtc.c
29474@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
29475 switch (cmd) {
29476
29477 case RTC_PLL_GET:
29478+ memset(&pll, 0, sizeof(pll));
29479 if (get_rtc_pll(&pll))
29480 return -EINVAL;
29481 else
29482diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29483index dfd7876..c0b0885 100644
29484--- a/drivers/char/hpet.c
29485+++ b/drivers/char/hpet.c
29486@@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
29487 }
29488
29489 static int
29490-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29491+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29492 struct hpet_info *info)
29493 {
29494 struct hpet_timer __iomem *timer;
29495diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
29496index 2c29942..604c5ba 100644
29497--- a/drivers/char/ipmi/ipmi_msghandler.c
29498+++ b/drivers/char/ipmi/ipmi_msghandler.c
29499@@ -420,7 +420,7 @@ struct ipmi_smi {
29500 struct proc_dir_entry *proc_dir;
29501 char proc_dir_name[10];
29502
29503- atomic_t stats[IPMI_NUM_STATS];
29504+ atomic_unchecked_t stats[IPMI_NUM_STATS];
29505
29506 /*
29507 * run_to_completion duplicate of smb_info, smi_info
29508@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
29509
29510
29511 #define ipmi_inc_stat(intf, stat) \
29512- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29513+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29514 #define ipmi_get_stat(intf, stat) \
29515- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29516+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29517
29518 static int is_lan_addr(struct ipmi_addr *addr)
29519 {
29520@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29521 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29522 init_waitqueue_head(&intf->waitq);
29523 for (i = 0; i < IPMI_NUM_STATS; i++)
29524- atomic_set(&intf->stats[i], 0);
29525+ atomic_set_unchecked(&intf->stats[i], 0);
29526
29527 intf->proc_dir = NULL;
29528
29529diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
29530index 1e638ff..a869ef5 100644
29531--- a/drivers/char/ipmi/ipmi_si_intf.c
29532+++ b/drivers/char/ipmi/ipmi_si_intf.c
29533@@ -275,7 +275,7 @@ struct smi_info {
29534 unsigned char slave_addr;
29535
29536 /* Counters and things for the proc filesystem. */
29537- atomic_t stats[SI_NUM_STATS];
29538+ atomic_unchecked_t stats[SI_NUM_STATS];
29539
29540 struct task_struct *thread;
29541
29542@@ -284,9 +284,9 @@ struct smi_info {
29543 };
29544
29545 #define smi_inc_stat(smi, stat) \
29546- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29547+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29548 #define smi_get_stat(smi, stat) \
29549- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29550+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29551
29552 #define SI_MAX_PARMS 4
29553
29554@@ -3209,7 +3209,7 @@ static int try_smi_init(struct smi_info *new_smi)
29555 atomic_set(&new_smi->req_events, 0);
29556 new_smi->run_to_completion = 0;
29557 for (i = 0; i < SI_NUM_STATS; i++)
29558- atomic_set(&new_smi->stats[i], 0);
29559+ atomic_set_unchecked(&new_smi->stats[i], 0);
29560
29561 new_smi->interrupt_disabled = 1;
29562 atomic_set(&new_smi->stop_operation, 0);
29563diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29564index 47ff7e4..0c7d340 100644
29565--- a/drivers/char/mbcs.c
29566+++ b/drivers/char/mbcs.c
29567@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
29568 return 0;
29569 }
29570
29571-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29572+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29573 {
29574 .part_num = MBCS_PART_NUM,
29575 .mfg_num = MBCS_MFG_NUM,
29576diff --git a/drivers/char/mem.c b/drivers/char/mem.c
29577index 67c3371..ba8429d 100644
29578--- a/drivers/char/mem.c
29579+++ b/drivers/char/mem.c
29580@@ -18,6 +18,7 @@
29581 #include <linux/raw.h>
29582 #include <linux/tty.h>
29583 #include <linux/capability.h>
29584+#include <linux/security.h>
29585 #include <linux/ptrace.h>
29586 #include <linux/device.h>
29587 #include <linux/highmem.h>
29588@@ -35,6 +36,10 @@
29589 # include <linux/efi.h>
29590 #endif
29591
29592+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29593+extern const struct file_operations grsec_fops;
29594+#endif
29595+
29596 static inline unsigned long size_inside_page(unsigned long start,
29597 unsigned long size)
29598 {
29599@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29600
29601 while (cursor < to) {
29602 if (!devmem_is_allowed(pfn)) {
29603+#ifdef CONFIG_GRKERNSEC_KMEM
29604+ gr_handle_mem_readwrite(from, to);
29605+#else
29606 printk(KERN_INFO
29607 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29608 current->comm, from, to);
29609+#endif
29610 return 0;
29611 }
29612 cursor += PAGE_SIZE;
29613@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29614 }
29615 return 1;
29616 }
29617+#elif defined(CONFIG_GRKERNSEC_KMEM)
29618+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29619+{
29620+ return 0;
29621+}
29622 #else
29623 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29624 {
29625@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29626
29627 while (count > 0) {
29628 unsigned long remaining;
29629+ char *temp;
29630
29631 sz = size_inside_page(p, count);
29632
29633@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29634 if (!ptr)
29635 return -EFAULT;
29636
29637- remaining = copy_to_user(buf, ptr, sz);
29638+#ifdef CONFIG_PAX_USERCOPY
29639+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
29640+ if (!temp) {
29641+ unxlate_dev_mem_ptr(p, ptr);
29642+ return -ENOMEM;
29643+ }
29644+ memcpy(temp, ptr, sz);
29645+#else
29646+ temp = ptr;
29647+#endif
29648+
29649+ remaining = copy_to_user(buf, temp, sz);
29650+
29651+#ifdef CONFIG_PAX_USERCOPY
29652+ kfree(temp);
29653+#endif
29654+
29655 unxlate_dev_mem_ptr(p, ptr);
29656 if (remaining)
29657 return -EFAULT;
29658@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29659 size_t count, loff_t *ppos)
29660 {
29661 unsigned long p = *ppos;
29662- ssize_t low_count, read, sz;
29663+ ssize_t low_count, read, sz, err = 0;
29664 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29665- int err = 0;
29666
29667 read = 0;
29668 if (p < (unsigned long) high_memory) {
29669@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29670 }
29671 #endif
29672 while (low_count > 0) {
29673+ char *temp;
29674+
29675 sz = size_inside_page(p, low_count);
29676
29677 /*
29678@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29679 */
29680 kbuf = xlate_dev_kmem_ptr((char *)p);
29681
29682- if (copy_to_user(buf, kbuf, sz))
29683+#ifdef CONFIG_PAX_USERCOPY
29684+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
29685+ if (!temp)
29686+ return -ENOMEM;
29687+ memcpy(temp, kbuf, sz);
29688+#else
29689+ temp = kbuf;
29690+#endif
29691+
29692+ err = copy_to_user(buf, temp, sz);
29693+
29694+#ifdef CONFIG_PAX_USERCOPY
29695+ kfree(temp);
29696+#endif
29697+
29698+ if (err)
29699 return -EFAULT;
29700 buf += sz;
29701 p += sz;
29702@@ -831,6 +878,9 @@ static const struct memdev {
29703 #ifdef CONFIG_CRASH_DUMP
29704 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29705 #endif
29706+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29707+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29708+#endif
29709 };
29710
29711 static int memory_open(struct inode *inode, struct file *filp)
29712diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29713index 9df78e2..01ba9ae 100644
29714--- a/drivers/char/nvram.c
29715+++ b/drivers/char/nvram.c
29716@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
29717
29718 spin_unlock_irq(&rtc_lock);
29719
29720- if (copy_to_user(buf, contents, tmp - contents))
29721+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29722 return -EFAULT;
29723
29724 *ppos = i;
29725diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
29726index 0a484b4..f48ccd1 100644
29727--- a/drivers/char/pcmcia/synclink_cs.c
29728+++ b/drivers/char/pcmcia/synclink_cs.c
29729@@ -2340,9 +2340,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
29730
29731 if (debug_level >= DEBUG_LEVEL_INFO)
29732 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
29733- __FILE__,__LINE__, info->device_name, port->count);
29734+ __FILE__,__LINE__, info->device_name, atomic_read(&port->count));
29735
29736- WARN_ON(!port->count);
29737+ WARN_ON(!atomic_read(&port->count));
29738
29739 if (tty_port_close_start(port, tty, filp) == 0)
29740 goto cleanup;
29741@@ -2360,7 +2360,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
29742 cleanup:
29743 if (debug_level >= DEBUG_LEVEL_INFO)
29744 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
29745- tty->driver->name, port->count);
29746+ tty->driver->name, atomic_read(&port->count));
29747 }
29748
29749 /* Wait until the transmitter is empty.
29750@@ -2502,7 +2502,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
29751
29752 if (debug_level >= DEBUG_LEVEL_INFO)
29753 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
29754- __FILE__,__LINE__,tty->driver->name, port->count);
29755+ __FILE__,__LINE__,tty->driver->name, atomic_read(&port->count));
29756
29757 /* If port is closing, signal caller to try again */
29758 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
29759@@ -2522,11 +2522,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
29760 goto cleanup;
29761 }
29762 spin_lock(&port->lock);
29763- port->count++;
29764+ atomic_inc(&port->count);
29765 spin_unlock(&port->lock);
29766 spin_unlock_irqrestore(&info->netlock, flags);
29767
29768- if (port->count == 1) {
29769+ if (atomic_read(&port->count) == 1) {
29770 /* 1st open on this device, init hardware */
29771 retval = startup(info, tty);
29772 if (retval < 0)
29773@@ -3891,7 +3891,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
29774 unsigned short new_crctype;
29775
29776 /* return error if TTY interface open */
29777- if (info->port.count)
29778+ if (atomic_read(&info->port.count))
29779 return -EBUSY;
29780
29781 switch (encoding)
29782@@ -3994,7 +3994,7 @@ static int hdlcdev_open(struct net_device *dev)
29783
29784 /* arbitrate between network and tty opens */
29785 spin_lock_irqsave(&info->netlock, flags);
29786- if (info->port.count != 0 || info->netcount != 0) {
29787+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
29788 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
29789 spin_unlock_irqrestore(&info->netlock, flags);
29790 return -EBUSY;
29791@@ -4083,7 +4083,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
29792 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
29793
29794 /* return error if TTY interface open */
29795- if (info->port.count)
29796+ if (atomic_read(&info->port.count))
29797 return -EBUSY;
29798
29799 if (cmd != SIOCWANDEV)
29800diff --git a/drivers/char/random.c b/drivers/char/random.c
29801index d98b2a6..f0ceb97 100644
29802--- a/drivers/char/random.c
29803+++ b/drivers/char/random.c
29804@@ -272,8 +272,13 @@
29805 /*
29806 * Configuration information
29807 */
29808+#ifdef CONFIG_GRKERNSEC_RANDNET
29809+#define INPUT_POOL_WORDS 512
29810+#define OUTPUT_POOL_WORDS 128
29811+#else
29812 #define INPUT_POOL_WORDS 128
29813 #define OUTPUT_POOL_WORDS 32
29814+#endif
29815 #define SEC_XFER_SIZE 512
29816 #define EXTRACT_SIZE 10
29817
29818@@ -313,10 +318,17 @@ static struct poolinfo {
29819 int poolwords;
29820 int tap1, tap2, tap3, tap4, tap5;
29821 } poolinfo_table[] = {
29822+#ifdef CONFIG_GRKERNSEC_RANDNET
29823+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29824+ { 512, 411, 308, 208, 104, 1 },
29825+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29826+ { 128, 103, 76, 51, 25, 1 },
29827+#else
29828 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29829 { 128, 103, 76, 51, 25, 1 },
29830 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29831 { 32, 26, 20, 14, 7, 1 },
29832+#endif
29833 #if 0
29834 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29835 { 2048, 1638, 1231, 819, 411, 1 },
29836@@ -527,8 +539,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
29837 input_rotate += i ? 7 : 14;
29838 }
29839
29840- ACCESS_ONCE(r->input_rotate) = input_rotate;
29841- ACCESS_ONCE(r->add_ptr) = i;
29842+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
29843+ ACCESS_ONCE_RW(r->add_ptr) = i;
29844 smp_wmb();
29845
29846 if (out)
29847@@ -799,6 +811,17 @@ void add_disk_randomness(struct gendisk *disk)
29848 }
29849 #endif
29850
29851+#ifdef CONFIG_PAX_LATENT_ENTROPY
29852+u64 latent_entropy;
29853+
29854+__init void transfer_latent_entropy(void)
29855+{
29856+ mix_pool_bytes(&input_pool, &latent_entropy, sizeof(latent_entropy), NULL);
29857+ mix_pool_bytes(&nonblocking_pool, &latent_entropy, sizeof(latent_entropy), NULL);
29858+// printk(KERN_INFO "PAX: transferring latent entropy: %16llx\n", latent_entropy);
29859+}
29860+#endif
29861+
29862 /*********************************************************************
29863 *
29864 * Entropy extraction routines
29865@@ -1008,7 +1031,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
29866
29867 extract_buf(r, tmp);
29868 i = min_t(int, nbytes, EXTRACT_SIZE);
29869- if (copy_to_user(buf, tmp, i)) {
29870+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
29871 ret = -EFAULT;
29872 break;
29873 }
29874@@ -1342,7 +1365,7 @@ EXPORT_SYMBOL(generate_random_uuid);
29875 #include <linux/sysctl.h>
29876
29877 static int min_read_thresh = 8, min_write_thresh;
29878-static int max_read_thresh = INPUT_POOL_WORDS * 32;
29879+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29880 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29881 static char sysctl_bootid[16];
29882
29883diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
29884index 45713f0..8286d21 100644
29885--- a/drivers/char/sonypi.c
29886+++ b/drivers/char/sonypi.c
29887@@ -54,6 +54,7 @@
29888
29889 #include <asm/uaccess.h>
29890 #include <asm/io.h>
29891+#include <asm/local.h>
29892
29893 #include <linux/sonypi.h>
29894
29895@@ -490,7 +491,7 @@ static struct sonypi_device {
29896 spinlock_t fifo_lock;
29897 wait_queue_head_t fifo_proc_list;
29898 struct fasync_struct *fifo_async;
29899- int open_count;
29900+ local_t open_count;
29901 int model;
29902 struct input_dev *input_jog_dev;
29903 struct input_dev *input_key_dev;
29904@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
29905 static int sonypi_misc_release(struct inode *inode, struct file *file)
29906 {
29907 mutex_lock(&sonypi_device.lock);
29908- sonypi_device.open_count--;
29909+ local_dec(&sonypi_device.open_count);
29910 mutex_unlock(&sonypi_device.lock);
29911 return 0;
29912 }
29913@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
29914 {
29915 mutex_lock(&sonypi_device.lock);
29916 /* Flush input queue on first open */
29917- if (!sonypi_device.open_count)
29918+ if (!local_read(&sonypi_device.open_count))
29919 kfifo_reset(&sonypi_device.fifo);
29920- sonypi_device.open_count++;
29921+ local_inc(&sonypi_device.open_count);
29922 mutex_unlock(&sonypi_device.lock);
29923
29924 return 0;
29925diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
29926index 08427ab..1ab10b7 100644
29927--- a/drivers/char/tpm/tpm.c
29928+++ b/drivers/char/tpm/tpm.c
29929@@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
29930 chip->vendor.req_complete_val)
29931 goto out_recv;
29932
29933- if ((status == chip->vendor.req_canceled)) {
29934+ if (status == chip->vendor.req_canceled) {
29935 dev_err(chip->dev, "Operation Canceled\n");
29936 rc = -ECANCELED;
29937 goto out;
29938diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
29939index 0636520..169c1d0 100644
29940--- a/drivers/char/tpm/tpm_bios.c
29941+++ b/drivers/char/tpm/tpm_bios.c
29942@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
29943 event = addr;
29944
29945 if ((event->event_type == 0 && event->event_size == 0) ||
29946- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
29947+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
29948 return NULL;
29949
29950 return addr;
29951@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
29952 return NULL;
29953
29954 if ((event->event_type == 0 && event->event_size == 0) ||
29955- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
29956+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
29957 return NULL;
29958
29959 (*pos)++;
29960@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
29961 int i;
29962
29963 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
29964- seq_putc(m, data[i]);
29965+ if (!seq_putc(m, data[i]))
29966+ return -EFAULT;
29967
29968 return 0;
29969 }
29970@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
29971 log->bios_event_log_end = log->bios_event_log + len;
29972
29973 virt = acpi_os_map_memory(start, len);
29974+ if (!virt) {
29975+ kfree(log->bios_event_log);
29976+ log->bios_event_log = NULL;
29977+ return -EFAULT;
29978+ }
29979
29980- memcpy(log->bios_event_log, virt, len);
29981+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
29982
29983 acpi_os_unmap_memory(virt, len);
29984 return 0;
29985diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
29986index cdf2f54..e55c197 100644
29987--- a/drivers/char/virtio_console.c
29988+++ b/drivers/char/virtio_console.c
29989@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
29990 if (to_user) {
29991 ssize_t ret;
29992
29993- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
29994+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
29995 if (ret)
29996 return -EFAULT;
29997 } else {
29998@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
29999 if (!port_has_data(port) && !port->host_connected)
30000 return 0;
30001
30002- return fill_readbuf(port, ubuf, count, true);
30003+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
30004 }
30005
30006 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
30007diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
30008index 97f5064..202b6e6 100644
30009--- a/drivers/edac/edac_pci_sysfs.c
30010+++ b/drivers/edac/edac_pci_sysfs.c
30011@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
30012 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
30013 static int edac_pci_poll_msec = 1000; /* one second workq period */
30014
30015-static atomic_t pci_parity_count = ATOMIC_INIT(0);
30016-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
30017+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
30018+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
30019
30020 static struct kobject *edac_pci_top_main_kobj;
30021 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
30022@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30023 edac_printk(KERN_CRIT, EDAC_PCI,
30024 "Signaled System Error on %s\n",
30025 pci_name(dev));
30026- atomic_inc(&pci_nonparity_count);
30027+ atomic_inc_unchecked(&pci_nonparity_count);
30028 }
30029
30030 if (status & (PCI_STATUS_PARITY)) {
30031@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30032 "Master Data Parity Error on %s\n",
30033 pci_name(dev));
30034
30035- atomic_inc(&pci_parity_count);
30036+ atomic_inc_unchecked(&pci_parity_count);
30037 }
30038
30039 if (status & (PCI_STATUS_DETECTED_PARITY)) {
30040@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30041 "Detected Parity Error on %s\n",
30042 pci_name(dev));
30043
30044- atomic_inc(&pci_parity_count);
30045+ atomic_inc_unchecked(&pci_parity_count);
30046 }
30047 }
30048
30049@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30050 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
30051 "Signaled System Error on %s\n",
30052 pci_name(dev));
30053- atomic_inc(&pci_nonparity_count);
30054+ atomic_inc_unchecked(&pci_nonparity_count);
30055 }
30056
30057 if (status & (PCI_STATUS_PARITY)) {
30058@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30059 "Master Data Parity Error on "
30060 "%s\n", pci_name(dev));
30061
30062- atomic_inc(&pci_parity_count);
30063+ atomic_inc_unchecked(&pci_parity_count);
30064 }
30065
30066 if (status & (PCI_STATUS_DETECTED_PARITY)) {
30067@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
30068 "Detected Parity Error on %s\n",
30069 pci_name(dev));
30070
30071- atomic_inc(&pci_parity_count);
30072+ atomic_inc_unchecked(&pci_parity_count);
30073 }
30074 }
30075 }
30076@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
30077 if (!check_pci_errors)
30078 return;
30079
30080- before_count = atomic_read(&pci_parity_count);
30081+ before_count = atomic_read_unchecked(&pci_parity_count);
30082
30083 /* scan all PCI devices looking for a Parity Error on devices and
30084 * bridges.
30085@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
30086 /* Only if operator has selected panic on PCI Error */
30087 if (edac_pci_get_panic_on_pe()) {
30088 /* If the count is different 'after' from 'before' */
30089- if (before_count != atomic_read(&pci_parity_count))
30090+ if (before_count != atomic_read_unchecked(&pci_parity_count))
30091 panic("EDAC: PCI Parity Error");
30092 }
30093 }
30094diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
30095index 8c87a5e..a19cbd7 100644
30096--- a/drivers/edac/mce_amd.h
30097+++ b/drivers/edac/mce_amd.h
30098@@ -80,7 +80,7 @@ extern const char * const ii_msgs[];
30099 struct amd_decoder_ops {
30100 bool (*dc_mce)(u16, u8);
30101 bool (*ic_mce)(u16, u8);
30102-};
30103+} __no_const;
30104
30105 void amd_report_gart_errors(bool);
30106 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
30107diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
30108index 57ea7f4..789e3c3 100644
30109--- a/drivers/firewire/core-card.c
30110+++ b/drivers/firewire/core-card.c
30111@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
30112
30113 void fw_core_remove_card(struct fw_card *card)
30114 {
30115- struct fw_card_driver dummy_driver = dummy_driver_template;
30116+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
30117
30118 card->driver->update_phy_reg(card, 4,
30119 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
30120diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
30121index 2783f69..9f4b0cc 100644
30122--- a/drivers/firewire/core-cdev.c
30123+++ b/drivers/firewire/core-cdev.c
30124@@ -1365,8 +1365,7 @@ static int init_iso_resource(struct client *client,
30125 int ret;
30126
30127 if ((request->channels == 0 && request->bandwidth == 0) ||
30128- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
30129- request->bandwidth < 0)
30130+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
30131 return -EINVAL;
30132
30133 r = kmalloc(sizeof(*r), GFP_KERNEL);
30134diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
30135index 780708d..ad60a66 100644
30136--- a/drivers/firewire/core-transaction.c
30137+++ b/drivers/firewire/core-transaction.c
30138@@ -37,6 +37,7 @@
30139 #include <linux/timer.h>
30140 #include <linux/types.h>
30141 #include <linux/workqueue.h>
30142+#include <linux/sched.h>
30143
30144 #include <asm/byteorder.h>
30145
30146diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
30147index 515a42c..5ecf3ba 100644
30148--- a/drivers/firewire/core.h
30149+++ b/drivers/firewire/core.h
30150@@ -111,6 +111,7 @@ struct fw_card_driver {
30151
30152 int (*stop_iso)(struct fw_iso_context *ctx);
30153 };
30154+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
30155
30156 void fw_card_initialize(struct fw_card *card,
30157 const struct fw_card_driver *driver, struct device *device);
30158diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
30159index b298158..7ed8432 100644
30160--- a/drivers/firmware/dmi_scan.c
30161+++ b/drivers/firmware/dmi_scan.c
30162@@ -452,11 +452,6 @@ void __init dmi_scan_machine(void)
30163 }
30164 }
30165 else {
30166- /*
30167- * no iounmap() for that ioremap(); it would be a no-op, but
30168- * it's so early in setup that sucker gets confused into doing
30169- * what it shouldn't if we actually call it.
30170- */
30171 p = dmi_ioremap(0xF0000, 0x10000);
30172 if (p == NULL)
30173 goto error;
30174@@ -726,7 +721,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
30175 if (buf == NULL)
30176 return -1;
30177
30178- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
30179+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
30180
30181 iounmap(buf);
30182 return 0;
30183diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
30184index 82d5c20..44a7177 100644
30185--- a/drivers/gpio/gpio-vr41xx.c
30186+++ b/drivers/gpio/gpio-vr41xx.c
30187@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
30188 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
30189 maskl, pendl, maskh, pendh);
30190
30191- atomic_inc(&irq_err_count);
30192+ atomic_inc_unchecked(&irq_err_count);
30193
30194 return -EINVAL;
30195 }
30196diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
30197index 3252e70..b5314ace 100644
30198--- a/drivers/gpu/drm/drm_crtc_helper.c
30199+++ b/drivers/gpu/drm/drm_crtc_helper.c
30200@@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
30201 struct drm_crtc *tmp;
30202 int crtc_mask = 1;
30203
30204- WARN(!crtc, "checking null crtc?\n");
30205+ BUG_ON(!crtc);
30206
30207 dev = crtc->dev;
30208
30209diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
30210index 8a9d079..606cdd5 100644
30211--- a/drivers/gpu/drm/drm_drv.c
30212+++ b/drivers/gpu/drm/drm_drv.c
30213@@ -318,7 +318,7 @@ module_exit(drm_core_exit);
30214 /**
30215 * Copy and IOCTL return string to user space
30216 */
30217-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
30218+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
30219 {
30220 int len;
30221
30222@@ -401,7 +401,7 @@ long drm_ioctl(struct file *filp,
30223 return -ENODEV;
30224
30225 atomic_inc(&dev->ioctl_count);
30226- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
30227+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
30228 ++file_priv->ioctl_count;
30229
30230 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
30231diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
30232index 123de28..43a0897 100644
30233--- a/drivers/gpu/drm/drm_fops.c
30234+++ b/drivers/gpu/drm/drm_fops.c
30235@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
30236 }
30237
30238 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
30239- atomic_set(&dev->counts[i], 0);
30240+ atomic_set_unchecked(&dev->counts[i], 0);
30241
30242 dev->sigdata.lock = NULL;
30243
30244@@ -138,8 +138,8 @@ int drm_open(struct inode *inode, struct file *filp)
30245
30246 retcode = drm_open_helper(inode, filp, dev);
30247 if (!retcode) {
30248- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
30249- if (!dev->open_count++)
30250+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
30251+ if (local_inc_return(&dev->open_count) == 1)
30252 retcode = drm_setup(dev);
30253 }
30254 if (!retcode) {
30255@@ -482,7 +482,7 @@ int drm_release(struct inode *inode, struct file *filp)
30256
30257 mutex_lock(&drm_global_mutex);
30258
30259- DRM_DEBUG("open_count = %d\n", dev->open_count);
30260+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
30261
30262 if (dev->driver->preclose)
30263 dev->driver->preclose(dev, file_priv);
30264@@ -491,10 +491,10 @@ int drm_release(struct inode *inode, struct file *filp)
30265 * Begin inline drm_release
30266 */
30267
30268- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
30269+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
30270 task_pid_nr(current),
30271 (long)old_encode_dev(file_priv->minor->device),
30272- dev->open_count);
30273+ local_read(&dev->open_count));
30274
30275 /* Release any auth tokens that might point to this file_priv,
30276 (do that under the drm_global_mutex) */
30277@@ -584,8 +584,8 @@ int drm_release(struct inode *inode, struct file *filp)
30278 * End inline drm_release
30279 */
30280
30281- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
30282- if (!--dev->open_count) {
30283+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
30284+ if (local_dec_and_test(&dev->open_count)) {
30285 if (atomic_read(&dev->ioctl_count)) {
30286 DRM_ERROR("Device busy: %d\n",
30287 atomic_read(&dev->ioctl_count));
30288diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
30289index c87dc96..326055d 100644
30290--- a/drivers/gpu/drm/drm_global.c
30291+++ b/drivers/gpu/drm/drm_global.c
30292@@ -36,7 +36,7 @@
30293 struct drm_global_item {
30294 struct mutex mutex;
30295 void *object;
30296- int refcount;
30297+ atomic_t refcount;
30298 };
30299
30300 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30301@@ -49,7 +49,7 @@ void drm_global_init(void)
30302 struct drm_global_item *item = &glob[i];
30303 mutex_init(&item->mutex);
30304 item->object = NULL;
30305- item->refcount = 0;
30306+ atomic_set(&item->refcount, 0);
30307 }
30308 }
30309
30310@@ -59,7 +59,7 @@ void drm_global_release(void)
30311 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30312 struct drm_global_item *item = &glob[i];
30313 BUG_ON(item->object != NULL);
30314- BUG_ON(item->refcount != 0);
30315+ BUG_ON(atomic_read(&item->refcount) != 0);
30316 }
30317 }
30318
30319@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30320 void *object;
30321
30322 mutex_lock(&item->mutex);
30323- if (item->refcount == 0) {
30324+ if (atomic_read(&item->refcount) == 0) {
30325 item->object = kzalloc(ref->size, GFP_KERNEL);
30326 if (unlikely(item->object == NULL)) {
30327 ret = -ENOMEM;
30328@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30329 goto out_err;
30330
30331 }
30332- ++item->refcount;
30333+ atomic_inc(&item->refcount);
30334 ref->object = item->object;
30335 object = item->object;
30336 mutex_unlock(&item->mutex);
30337@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
30338 struct drm_global_item *item = &glob[ref->global_type];
30339
30340 mutex_lock(&item->mutex);
30341- BUG_ON(item->refcount == 0);
30342+ BUG_ON(atomic_read(&item->refcount) == 0);
30343 BUG_ON(ref->object != item->object);
30344- if (--item->refcount == 0) {
30345+ if (atomic_dec_and_test(&item->refcount)) {
30346 ref->release(ref);
30347 item->object = NULL;
30348 }
30349diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30350index ab1162d..42587b2 100644
30351--- a/drivers/gpu/drm/drm_info.c
30352+++ b/drivers/gpu/drm/drm_info.c
30353@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
30354 struct drm_local_map *map;
30355 struct drm_map_list *r_list;
30356
30357- /* Hardcoded from _DRM_FRAME_BUFFER,
30358- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30359- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30360- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30361+ static const char * const types[] = {
30362+ [_DRM_FRAME_BUFFER] = "FB",
30363+ [_DRM_REGISTERS] = "REG",
30364+ [_DRM_SHM] = "SHM",
30365+ [_DRM_AGP] = "AGP",
30366+ [_DRM_SCATTER_GATHER] = "SG",
30367+ [_DRM_CONSISTENT] = "PCI",
30368+ [_DRM_GEM] = "GEM" };
30369 const char *type;
30370 int i;
30371
30372@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
30373 map = r_list->map;
30374 if (!map)
30375 continue;
30376- if (map->type < 0 || map->type > 5)
30377+ if (map->type >= ARRAY_SIZE(types))
30378 type = "??";
30379 else
30380 type = types[map->type];
30381@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
30382 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30383 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30384 vma->vm_flags & VM_IO ? 'i' : '-',
30385+#ifdef CONFIG_GRKERNSEC_HIDESYM
30386+ 0);
30387+#else
30388 vma->vm_pgoff);
30389+#endif
30390
30391 #if defined(__i386__)
30392 pgprot = pgprot_val(vma->vm_page_prot);
30393diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
30394index 637fcc3..e890b33 100644
30395--- a/drivers/gpu/drm/drm_ioc32.c
30396+++ b/drivers/gpu/drm/drm_ioc32.c
30397@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
30398 request = compat_alloc_user_space(nbytes);
30399 if (!access_ok(VERIFY_WRITE, request, nbytes))
30400 return -EFAULT;
30401- list = (struct drm_buf_desc *) (request + 1);
30402+ list = (struct drm_buf_desc __user *) (request + 1);
30403
30404 if (__put_user(count, &request->count)
30405 || __put_user(list, &request->list))
30406@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
30407 request = compat_alloc_user_space(nbytes);
30408 if (!access_ok(VERIFY_WRITE, request, nbytes))
30409 return -EFAULT;
30410- list = (struct drm_buf_pub *) (request + 1);
30411+ list = (struct drm_buf_pub __user *) (request + 1);
30412
30413 if (__put_user(count, &request->count)
30414 || __put_user(list, &request->list))
30415diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
30416index 64a62c6..ceab35e 100644
30417--- a/drivers/gpu/drm/drm_ioctl.c
30418+++ b/drivers/gpu/drm/drm_ioctl.c
30419@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
30420 stats->data[i].value =
30421 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30422 else
30423- stats->data[i].value = atomic_read(&dev->counts[i]);
30424+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30425 stats->data[i].type = dev->types[i];
30426 }
30427
30428diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
30429index 5211520..c744d85 100644
30430--- a/drivers/gpu/drm/drm_lock.c
30431+++ b/drivers/gpu/drm/drm_lock.c
30432@@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30433 if (drm_lock_take(&master->lock, lock->context)) {
30434 master->lock.file_priv = file_priv;
30435 master->lock.lock_time = jiffies;
30436- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30437+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30438 break; /* Got lock */
30439 }
30440
30441@@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30442 return -EINVAL;
30443 }
30444
30445- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30446+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30447
30448 if (drm_lock_free(&master->lock, lock->context)) {
30449 /* FIXME: Should really bail out here. */
30450diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
30451index 21bcd4a..8e074e0 100644
30452--- a/drivers/gpu/drm/drm_stub.c
30453+++ b/drivers/gpu/drm/drm_stub.c
30454@@ -511,7 +511,7 @@ void drm_unplug_dev(struct drm_device *dev)
30455
30456 drm_device_set_unplugged(dev);
30457
30458- if (dev->open_count == 0) {
30459+ if (local_read(&dev->open_count) == 0) {
30460 drm_put_dev(dev);
30461 }
30462 mutex_unlock(&drm_global_mutex);
30463diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
30464index fa94391..ed26ec8 100644
30465--- a/drivers/gpu/drm/i810/i810_dma.c
30466+++ b/drivers/gpu/drm/i810/i810_dma.c
30467@@ -943,8 +943,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
30468 dma->buflist[vertex->idx],
30469 vertex->discard, vertex->used);
30470
30471- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30472- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30473+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30474+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30475 sarea_priv->last_enqueue = dev_priv->counter - 1;
30476 sarea_priv->last_dispatch = (int)hw_status[5];
30477
30478@@ -1104,8 +1104,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
30479 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30480 mc->last_render);
30481
30482- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30483- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30484+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30485+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30486 sarea_priv->last_enqueue = dev_priv->counter - 1;
30487 sarea_priv->last_dispatch = (int)hw_status[5];
30488
30489diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30490index c9339f4..f5e1b9d 100644
30491--- a/drivers/gpu/drm/i810/i810_drv.h
30492+++ b/drivers/gpu/drm/i810/i810_drv.h
30493@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30494 int page_flipping;
30495
30496 wait_queue_head_t irq_queue;
30497- atomic_t irq_received;
30498- atomic_t irq_emitted;
30499+ atomic_unchecked_t irq_received;
30500+ atomic_unchecked_t irq_emitted;
30501
30502 int front_offset;
30503 } drm_i810_private_t;
30504diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
30505index 5363e9c..59360d1 100644
30506--- a/drivers/gpu/drm/i915/i915_debugfs.c
30507+++ b/drivers/gpu/drm/i915/i915_debugfs.c
30508@@ -518,7 +518,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
30509 I915_READ(GTIMR));
30510 }
30511 seq_printf(m, "Interrupts received: %d\n",
30512- atomic_read(&dev_priv->irq_received));
30513+ atomic_read_unchecked(&dev_priv->irq_received));
30514 for (i = 0; i < I915_NUM_RINGS; i++) {
30515 if (IS_GEN6(dev) || IS_GEN7(dev)) {
30516 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
30517diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
30518index 36822b9..b725e1b 100644
30519--- a/drivers/gpu/drm/i915/i915_dma.c
30520+++ b/drivers/gpu/drm/i915/i915_dma.c
30521@@ -1266,7 +1266,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
30522 bool can_switch;
30523
30524 spin_lock(&dev->count_lock);
30525- can_switch = (dev->open_count == 0);
30526+ can_switch = (local_read(&dev->open_count) == 0);
30527 spin_unlock(&dev->count_lock);
30528 return can_switch;
30529 }
30530diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
30531index b0b676a..d107105 100644
30532--- a/drivers/gpu/drm/i915/i915_drv.h
30533+++ b/drivers/gpu/drm/i915/i915_drv.h
30534@@ -268,7 +268,7 @@ struct drm_i915_display_funcs {
30535 /* render clock increase/decrease */
30536 /* display clock increase/decrease */
30537 /* pll clock increase/decrease */
30538-};
30539+} __no_const;
30540
30541 struct intel_device_info {
30542 u8 gen;
30543@@ -386,7 +386,7 @@ typedef struct drm_i915_private {
30544 int current_page;
30545 int page_flipping;
30546
30547- atomic_t irq_received;
30548+ atomic_unchecked_t irq_received;
30549
30550 /* protects the irq masks */
30551 spinlock_t irq_lock;
30552@@ -985,7 +985,7 @@ struct drm_i915_gem_object {
30553 * will be page flipped away on the next vblank. When it
30554 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30555 */
30556- atomic_t pending_flip;
30557+ atomic_unchecked_t pending_flip;
30558 };
30559
30560 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
30561@@ -1434,7 +1434,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
30562 struct drm_i915_private *dev_priv, unsigned port);
30563 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30564 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30565-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30566+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30567 {
30568 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30569 }
30570diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30571index 974a9f1..b3ebd45 100644
30572--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30573+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30574@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
30575 i915_gem_clflush_object(obj);
30576
30577 if (obj->base.pending_write_domain)
30578- cd->flips |= atomic_read(&obj->pending_flip);
30579+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30580
30581 /* The actual obj->write_domain will be updated with
30582 * pending_write_domain after we emit the accumulated flush for all
30583@@ -916,9 +916,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
30584
30585 static int
30586 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30587- int count)
30588+ unsigned int count)
30589 {
30590- int i;
30591+ unsigned int i;
30592
30593 for (i = 0; i < count; i++) {
30594 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30595diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
30596index ed3224c..6618589 100644
30597--- a/drivers/gpu/drm/i915/i915_irq.c
30598+++ b/drivers/gpu/drm/i915/i915_irq.c
30599@@ -433,7 +433,7 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
30600 int vblank = 0;
30601 bool blc_event;
30602
30603- atomic_inc(&dev_priv->irq_received);
30604+ atomic_inc_unchecked(&dev_priv->irq_received);
30605
30606 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
30607 PIPE_VBLANK_INTERRUPT_STATUS;
30608@@ -586,7 +586,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
30609 irqreturn_t ret = IRQ_NONE;
30610 int i;
30611
30612- atomic_inc(&dev_priv->irq_received);
30613+ atomic_inc_unchecked(&dev_priv->irq_received);
30614
30615 /* disable master interrupt before clearing iir */
30616 de_ier = I915_READ(DEIER);
30617@@ -661,7 +661,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
30618 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30619 u32 hotplug_mask;
30620
30621- atomic_inc(&dev_priv->irq_received);
30622+ atomic_inc_unchecked(&dev_priv->irq_received);
30623
30624 /* disable master interrupt before clearing iir */
30625 de_ier = I915_READ(DEIER);
30626@@ -1646,7 +1646,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
30627 {
30628 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30629
30630- atomic_set(&dev_priv->irq_received, 0);
30631+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30632
30633
30634 I915_WRITE(HWSTAM, 0xeffe);
30635@@ -1673,7 +1673,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
30636 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30637 int pipe;
30638
30639- atomic_set(&dev_priv->irq_received, 0);
30640+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30641
30642 /* VLV magic */
30643 I915_WRITE(VLV_IMR, 0);
30644@@ -1969,7 +1969,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
30645 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30646 int pipe;
30647
30648- atomic_set(&dev_priv->irq_received, 0);
30649+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30650
30651 for_each_pipe(pipe)
30652 I915_WRITE(PIPESTAT(pipe), 0);
30653@@ -2020,7 +2020,7 @@ static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
30654 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
30655 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
30656
30657- atomic_inc(&dev_priv->irq_received);
30658+ atomic_inc_unchecked(&dev_priv->irq_received);
30659
30660 iir = I915_READ16(IIR);
30661 if (iir == 0)
30662@@ -2105,7 +2105,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
30663 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30664 int pipe;
30665
30666- atomic_set(&dev_priv->irq_received, 0);
30667+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30668
30669 if (I915_HAS_HOTPLUG(dev)) {
30670 I915_WRITE(PORT_HOTPLUG_EN, 0);
30671@@ -2200,7 +2200,7 @@ static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
30672 };
30673 int pipe, ret = IRQ_NONE;
30674
30675- atomic_inc(&dev_priv->irq_received);
30676+ atomic_inc_unchecked(&dev_priv->irq_received);
30677
30678 iir = I915_READ(IIR);
30679 do {
30680@@ -2326,7 +2326,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
30681 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30682 int pipe;
30683
30684- atomic_set(&dev_priv->irq_received, 0);
30685+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30686
30687 if (I915_HAS_HOTPLUG(dev)) {
30688 I915_WRITE(PORT_HOTPLUG_EN, 0);
30689@@ -2436,7 +2436,7 @@ static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
30690 int irq_received;
30691 int ret = IRQ_NONE, pipe;
30692
30693- atomic_inc(&dev_priv->irq_received);
30694+ atomic_inc_unchecked(&dev_priv->irq_received);
30695
30696 iir = I915_READ(IIR);
30697
30698diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
30699index 8a11131..46eeeaa 100644
30700--- a/drivers/gpu/drm/i915/intel_display.c
30701+++ b/drivers/gpu/drm/i915/intel_display.c
30702@@ -2000,7 +2000,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
30703
30704 wait_event(dev_priv->pending_flip_queue,
30705 atomic_read(&dev_priv->mm.wedged) ||
30706- atomic_read(&obj->pending_flip) == 0);
30707+ atomic_read_unchecked(&obj->pending_flip) == 0);
30708
30709 /* Big Hammer, we also need to ensure that any pending
30710 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
30711@@ -5914,9 +5914,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
30712
30713 obj = work->old_fb_obj;
30714
30715- atomic_clear_mask(1 << intel_crtc->plane,
30716- &obj->pending_flip.counter);
30717- if (atomic_read(&obj->pending_flip) == 0)
30718+ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
30719+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
30720 wake_up(&dev_priv->pending_flip_queue);
30721
30722 schedule_work(&work->work);
30723@@ -6253,7 +6252,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30724 /* Block clients from rendering to the new back buffer until
30725 * the flip occurs and the object is no longer visible.
30726 */
30727- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30728+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30729
30730 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
30731 if (ret)
30732@@ -6268,7 +6267,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30733 return 0;
30734
30735 cleanup_pending:
30736- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30737+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30738 drm_gem_object_unreference(&work->old_fb_obj->base);
30739 drm_gem_object_unreference(&obj->base);
30740 mutex_unlock(&dev->struct_mutex);
30741diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
30742index 54558a0..2d97005 100644
30743--- a/drivers/gpu/drm/mga/mga_drv.h
30744+++ b/drivers/gpu/drm/mga/mga_drv.h
30745@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30746 u32 clear_cmd;
30747 u32 maccess;
30748
30749- atomic_t vbl_received; /**< Number of vblanks received. */
30750+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30751 wait_queue_head_t fence_queue;
30752- atomic_t last_fence_retired;
30753+ atomic_unchecked_t last_fence_retired;
30754 u32 next_fence_to_post;
30755
30756 unsigned int fb_cpp;
30757diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
30758index 2581202..f230a8d9 100644
30759--- a/drivers/gpu/drm/mga/mga_irq.c
30760+++ b/drivers/gpu/drm/mga/mga_irq.c
30761@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
30762 if (crtc != 0)
30763 return 0;
30764
30765- return atomic_read(&dev_priv->vbl_received);
30766+ return atomic_read_unchecked(&dev_priv->vbl_received);
30767 }
30768
30769
30770@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30771 /* VBLANK interrupt */
30772 if (status & MGA_VLINEPEN) {
30773 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30774- atomic_inc(&dev_priv->vbl_received);
30775+ atomic_inc_unchecked(&dev_priv->vbl_received);
30776 drm_handle_vblank(dev, 0);
30777 handled = 1;
30778 }
30779@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30780 if ((prim_start & ~0x03) != (prim_end & ~0x03))
30781 MGA_WRITE(MGA_PRIMEND, prim_end);
30782
30783- atomic_inc(&dev_priv->last_fence_retired);
30784+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
30785 DRM_WAKEUP(&dev_priv->fence_queue);
30786 handled = 1;
30787 }
30788@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
30789 * using fences.
30790 */
30791 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30792- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30793+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30794 - *sequence) <= (1 << 23)));
30795
30796 *sequence = cur_fence;
30797diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
30798index 2f11e16..191267e 100644
30799--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
30800+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
30801@@ -5340,7 +5340,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
30802 struct bit_table {
30803 const char id;
30804 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
30805-};
30806+} __no_const;
30807
30808 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
30809
30810diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
30811index b863a3a..c55e0dc 100644
30812--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
30813+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
30814@@ -302,7 +302,7 @@ struct nouveau_exec_engine {
30815 u32 handle, u16 class);
30816 void (*set_tile_region)(struct drm_device *dev, int i);
30817 void (*tlb_flush)(struct drm_device *, int engine);
30818-};
30819+} __no_const;
30820
30821 struct nouveau_instmem_engine {
30822 void *priv;
30823@@ -324,13 +324,13 @@ struct nouveau_instmem_engine {
30824 struct nouveau_mc_engine {
30825 int (*init)(struct drm_device *dev);
30826 void (*takedown)(struct drm_device *dev);
30827-};
30828+} __no_const;
30829
30830 struct nouveau_timer_engine {
30831 int (*init)(struct drm_device *dev);
30832 void (*takedown)(struct drm_device *dev);
30833 uint64_t (*read)(struct drm_device *dev);
30834-};
30835+} __no_const;
30836
30837 struct nouveau_fb_engine {
30838 int num_tiles;
30839@@ -547,7 +547,7 @@ struct nouveau_vram_engine {
30840 void (*put)(struct drm_device *, struct nouveau_mem **);
30841
30842 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30843-};
30844+} __no_const;
30845
30846 struct nouveau_engine {
30847 struct nouveau_instmem_engine instmem;
30848@@ -693,7 +693,7 @@ struct drm_nouveau_private {
30849 struct drm_global_reference mem_global_ref;
30850 struct ttm_bo_global_ref bo_global_ref;
30851 struct ttm_bo_device bdev;
30852- atomic_t validate_sequence;
30853+ atomic_unchecked_t validate_sequence;
30854 int (*move)(struct nouveau_channel *,
30855 struct ttm_buffer_object *,
30856 struct ttm_mem_reg *, struct ttm_mem_reg *);
30857diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30858index 30f5423..abca136 100644
30859--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30860+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30861@@ -319,7 +319,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30862 int trycnt = 0;
30863 int ret, i;
30864
30865- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30866+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30867 retry:
30868 if (++trycnt > 100000) {
30869 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30870diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30871index 19706f0..f257368 100644
30872--- a/drivers/gpu/drm/nouveau/nouveau_state.c
30873+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30874@@ -490,7 +490,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30875 bool can_switch;
30876
30877 spin_lock(&dev->count_lock);
30878- can_switch = (dev->open_count == 0);
30879+ can_switch = (local_read(&dev->open_count) == 0);
30880 spin_unlock(&dev->count_lock);
30881 return can_switch;
30882 }
30883diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
30884index a9514ea..369d511 100644
30885--- a/drivers/gpu/drm/nouveau/nv50_sor.c
30886+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
30887@@ -304,7 +304,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
30888 }
30889
30890 if (nv_encoder->dcb->type == OUTPUT_DP) {
30891- struct dp_train_func func = {
30892+ static struct dp_train_func func = {
30893 .link_set = nv50_sor_dp_link_set,
30894 .train_set = nv50_sor_dp_train_set,
30895 .train_adj = nv50_sor_dp_train_adj
30896diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
30897index c50b075..6b07dfc 100644
30898--- a/drivers/gpu/drm/nouveau/nvd0_display.c
30899+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
30900@@ -1366,7 +1366,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
30901 nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
30902
30903 if (nv_encoder->dcb->type == OUTPUT_DP) {
30904- struct dp_train_func func = {
30905+ static struct dp_train_func func = {
30906 .link_set = nvd0_sor_dp_link_set,
30907 .train_set = nvd0_sor_dp_train_set,
30908 .train_adj = nvd0_sor_dp_train_adj
30909diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30910index bcac90b..53bfc76 100644
30911--- a/drivers/gpu/drm/r128/r128_cce.c
30912+++ b/drivers/gpu/drm/r128/r128_cce.c
30913@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30914
30915 /* GH: Simple idle check.
30916 */
30917- atomic_set(&dev_priv->idle_count, 0);
30918+ atomic_set_unchecked(&dev_priv->idle_count, 0);
30919
30920 /* We don't support anything other than bus-mastering ring mode,
30921 * but the ring can be in either AGP or PCI space for the ring
30922diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30923index 930c71b..499aded 100644
30924--- a/drivers/gpu/drm/r128/r128_drv.h
30925+++ b/drivers/gpu/drm/r128/r128_drv.h
30926@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30927 int is_pci;
30928 unsigned long cce_buffers_offset;
30929
30930- atomic_t idle_count;
30931+ atomic_unchecked_t idle_count;
30932
30933 int page_flipping;
30934 int current_page;
30935 u32 crtc_offset;
30936 u32 crtc_offset_cntl;
30937
30938- atomic_t vbl_received;
30939+ atomic_unchecked_t vbl_received;
30940
30941 u32 color_fmt;
30942 unsigned int front_offset;
30943diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30944index 429d5a0..7e899ed 100644
30945--- a/drivers/gpu/drm/r128/r128_irq.c
30946+++ b/drivers/gpu/drm/r128/r128_irq.c
30947@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30948 if (crtc != 0)
30949 return 0;
30950
30951- return atomic_read(&dev_priv->vbl_received);
30952+ return atomic_read_unchecked(&dev_priv->vbl_received);
30953 }
30954
30955 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30956@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30957 /* VBLANK interrupt */
30958 if (status & R128_CRTC_VBLANK_INT) {
30959 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30960- atomic_inc(&dev_priv->vbl_received);
30961+ atomic_inc_unchecked(&dev_priv->vbl_received);
30962 drm_handle_vblank(dev, 0);
30963 return IRQ_HANDLED;
30964 }
30965diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30966index a9e33ce..09edd4b 100644
30967--- a/drivers/gpu/drm/r128/r128_state.c
30968+++ b/drivers/gpu/drm/r128/r128_state.c
30969@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30970
30971 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30972 {
30973- if (atomic_read(&dev_priv->idle_count) == 0)
30974+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30975 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30976 else
30977- atomic_set(&dev_priv->idle_count, 0);
30978+ atomic_set_unchecked(&dev_priv->idle_count, 0);
30979 }
30980
30981 #endif
30982diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30983index 5a82b6b..9e69c73 100644
30984--- a/drivers/gpu/drm/radeon/mkregtable.c
30985+++ b/drivers/gpu/drm/radeon/mkregtable.c
30986@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
30987 regex_t mask_rex;
30988 regmatch_t match[4];
30989 char buf[1024];
30990- size_t end;
30991+ long end;
30992 int len;
30993 int done = 0;
30994 int r;
30995 unsigned o;
30996 struct offset *offset;
30997 char last_reg_s[10];
30998- int last_reg;
30999+ unsigned long last_reg;
31000
31001 if (regcomp
31002 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
31003diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
31004index fefcca5..20a5b90 100644
31005--- a/drivers/gpu/drm/radeon/radeon.h
31006+++ b/drivers/gpu/drm/radeon/radeon.h
31007@@ -743,7 +743,7 @@ struct r600_blit_cp_primitives {
31008 int x2, int y2);
31009 void (*draw_auto)(struct radeon_device *rdev);
31010 void (*set_default_state)(struct radeon_device *rdev);
31011-};
31012+} __no_const;
31013
31014 struct r600_blit {
31015 struct radeon_bo *shader_obj;
31016@@ -1244,7 +1244,7 @@ struct radeon_asic {
31017 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
31018 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
31019 } pflip;
31020-};
31021+} __no_const;
31022
31023 /*
31024 * Asic structures
31025diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
31026index 066c98b..96ab858 100644
31027--- a/drivers/gpu/drm/radeon/radeon_device.c
31028+++ b/drivers/gpu/drm/radeon/radeon_device.c
31029@@ -692,7 +692,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
31030 bool can_switch;
31031
31032 spin_lock(&dev->count_lock);
31033- can_switch = (dev->open_count == 0);
31034+ can_switch = (local_read(&dev->open_count) == 0);
31035 spin_unlock(&dev->count_lock);
31036 return can_switch;
31037 }
31038diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
31039index a1b59ca..86f2d44 100644
31040--- a/drivers/gpu/drm/radeon/radeon_drv.h
31041+++ b/drivers/gpu/drm/radeon/radeon_drv.h
31042@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
31043
31044 /* SW interrupt */
31045 wait_queue_head_t swi_queue;
31046- atomic_t swi_emitted;
31047+ atomic_unchecked_t swi_emitted;
31048 int vblank_crtc;
31049 uint32_t irq_enable_reg;
31050 uint32_t r500_disp_irq_reg;
31051diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
31052index 48b7cea..342236f 100644
31053--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
31054+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
31055@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
31056 request = compat_alloc_user_space(sizeof(*request));
31057 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
31058 || __put_user(req32.param, &request->param)
31059- || __put_user((void __user *)(unsigned long)req32.value,
31060+ || __put_user((unsigned long)req32.value,
31061 &request->value))
31062 return -EFAULT;
31063
31064diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
31065index 00da384..32f972d 100644
31066--- a/drivers/gpu/drm/radeon/radeon_irq.c
31067+++ b/drivers/gpu/drm/radeon/radeon_irq.c
31068@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
31069 unsigned int ret;
31070 RING_LOCALS;
31071
31072- atomic_inc(&dev_priv->swi_emitted);
31073- ret = atomic_read(&dev_priv->swi_emitted);
31074+ atomic_inc_unchecked(&dev_priv->swi_emitted);
31075+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
31076
31077 BEGIN_RING(4);
31078 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
31079@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
31080 drm_radeon_private_t *dev_priv =
31081 (drm_radeon_private_t *) dev->dev_private;
31082
31083- atomic_set(&dev_priv->swi_emitted, 0);
31084+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
31085 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
31086
31087 dev->max_vblank_count = 0x001fffff;
31088diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
31089index e8422ae..d22d4a8 100644
31090--- a/drivers/gpu/drm/radeon/radeon_state.c
31091+++ b/drivers/gpu/drm/radeon/radeon_state.c
31092@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
31093 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
31094 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
31095
31096- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31097+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31098 sarea_priv->nbox * sizeof(depth_boxes[0])))
31099 return -EFAULT;
31100
31101@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
31102 {
31103 drm_radeon_private_t *dev_priv = dev->dev_private;
31104 drm_radeon_getparam_t *param = data;
31105- int value;
31106+ int value = 0;
31107
31108 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
31109
31110diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
31111index c94a225..5795d34 100644
31112--- a/drivers/gpu/drm/radeon/radeon_ttm.c
31113+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
31114@@ -852,8 +852,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
31115 }
31116 if (unlikely(ttm_vm_ops == NULL)) {
31117 ttm_vm_ops = vma->vm_ops;
31118- radeon_ttm_vm_ops = *ttm_vm_ops;
31119- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31120+ pax_open_kernel();
31121+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
31122+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31123+ pax_close_kernel();
31124 }
31125 vma->vm_ops = &radeon_ttm_vm_ops;
31126 return 0;
31127diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
31128index 159b6a4..fa82487 100644
31129--- a/drivers/gpu/drm/radeon/rs690.c
31130+++ b/drivers/gpu/drm/radeon/rs690.c
31131@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
31132 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
31133 rdev->pm.sideport_bandwidth.full)
31134 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
31135- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
31136+ read_delay_latency.full = dfixed_const(800 * 1000);
31137 read_delay_latency.full = dfixed_div(read_delay_latency,
31138 rdev->pm.igp_sideport_mclk);
31139+ a.full = dfixed_const(370);
31140+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
31141 } else {
31142 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
31143 rdev->pm.k8_bandwidth.full)
31144diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31145index ebc6fac..a8313ed 100644
31146--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
31147+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31148@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
31149 static int ttm_pool_mm_shrink(struct shrinker *shrink,
31150 struct shrink_control *sc)
31151 {
31152- static atomic_t start_pool = ATOMIC_INIT(0);
31153+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
31154 unsigned i;
31155- unsigned pool_offset = atomic_add_return(1, &start_pool);
31156+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
31157 struct ttm_page_pool *pool;
31158 int shrink_pages = sc->nr_to_scan;
31159
31160diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
31161index 88edacc..1e5412b 100644
31162--- a/drivers/gpu/drm/via/via_drv.h
31163+++ b/drivers/gpu/drm/via/via_drv.h
31164@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
31165 typedef uint32_t maskarray_t[5];
31166
31167 typedef struct drm_via_irq {
31168- atomic_t irq_received;
31169+ atomic_unchecked_t irq_received;
31170 uint32_t pending_mask;
31171 uint32_t enable_mask;
31172 wait_queue_head_t irq_queue;
31173@@ -75,7 +75,7 @@ typedef struct drm_via_private {
31174 struct timeval last_vblank;
31175 int last_vblank_valid;
31176 unsigned usec_per_vblank;
31177- atomic_t vbl_received;
31178+ atomic_unchecked_t vbl_received;
31179 drm_via_state_t hc_state;
31180 char pci_buf[VIA_PCI_BUF_SIZE];
31181 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
31182diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
31183index d391f48..10c8ca3 100644
31184--- a/drivers/gpu/drm/via/via_irq.c
31185+++ b/drivers/gpu/drm/via/via_irq.c
31186@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
31187 if (crtc != 0)
31188 return 0;
31189
31190- return atomic_read(&dev_priv->vbl_received);
31191+ return atomic_read_unchecked(&dev_priv->vbl_received);
31192 }
31193
31194 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31195@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31196
31197 status = VIA_READ(VIA_REG_INTERRUPT);
31198 if (status & VIA_IRQ_VBLANK_PENDING) {
31199- atomic_inc(&dev_priv->vbl_received);
31200- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
31201+ atomic_inc_unchecked(&dev_priv->vbl_received);
31202+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
31203 do_gettimeofday(&cur_vblank);
31204 if (dev_priv->last_vblank_valid) {
31205 dev_priv->usec_per_vblank =
31206@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31207 dev_priv->last_vblank = cur_vblank;
31208 dev_priv->last_vblank_valid = 1;
31209 }
31210- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
31211+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
31212 DRM_DEBUG("US per vblank is: %u\n",
31213 dev_priv->usec_per_vblank);
31214 }
31215@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31216
31217 for (i = 0; i < dev_priv->num_irqs; ++i) {
31218 if (status & cur_irq->pending_mask) {
31219- atomic_inc(&cur_irq->irq_received);
31220+ atomic_inc_unchecked(&cur_irq->irq_received);
31221 DRM_WAKEUP(&cur_irq->irq_queue);
31222 handled = 1;
31223 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
31224@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
31225 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31226 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
31227 masks[irq][4]));
31228- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
31229+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
31230 } else {
31231 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31232 (((cur_irq_sequence =
31233- atomic_read(&cur_irq->irq_received)) -
31234+ atomic_read_unchecked(&cur_irq->irq_received)) -
31235 *sequence) <= (1 << 23)));
31236 }
31237 *sequence = cur_irq_sequence;
31238@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
31239 }
31240
31241 for (i = 0; i < dev_priv->num_irqs; ++i) {
31242- atomic_set(&cur_irq->irq_received, 0);
31243+ atomic_set_unchecked(&cur_irq->irq_received, 0);
31244 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
31245 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31246 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
31247@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
31248 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31249 case VIA_IRQ_RELATIVE:
31250 irqwait->request.sequence +=
31251- atomic_read(&cur_irq->irq_received);
31252+ atomic_read_unchecked(&cur_irq->irq_received);
31253 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31254 case VIA_IRQ_ABSOLUTE:
31255 break;
31256diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31257index d0f2c07..9ebd9c3 100644
31258--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31259+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31260@@ -263,7 +263,7 @@ struct vmw_private {
31261 * Fencing and IRQs.
31262 */
31263
31264- atomic_t marker_seq;
31265+ atomic_unchecked_t marker_seq;
31266 wait_queue_head_t fence_queue;
31267 wait_queue_head_t fifo_queue;
31268 int fence_queue_waiters; /* Protected by hw_mutex */
31269diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31270index a0c2f12..68ae6cb 100644
31271--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31272+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31273@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
31274 (unsigned int) min,
31275 (unsigned int) fifo->capabilities);
31276
31277- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31278+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31279 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31280 vmw_marker_queue_init(&fifo->marker_queue);
31281 return vmw_fifo_send_fence(dev_priv, &dummy);
31282@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
31283 if (reserveable)
31284 iowrite32(bytes, fifo_mem +
31285 SVGA_FIFO_RESERVED);
31286- return fifo_mem + (next_cmd >> 2);
31287+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31288 } else {
31289 need_bounce = true;
31290 }
31291@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31292
31293 fm = vmw_fifo_reserve(dev_priv, bytes);
31294 if (unlikely(fm == NULL)) {
31295- *seqno = atomic_read(&dev_priv->marker_seq);
31296+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31297 ret = -ENOMEM;
31298 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
31299 false, 3*HZ);
31300@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31301 }
31302
31303 do {
31304- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31305+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31306 } while (*seqno == 0);
31307
31308 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
31309diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31310index cabc95f..14b3d77 100644
31311--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31312+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31313@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
31314 * emitted. Then the fence is stale and signaled.
31315 */
31316
31317- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31318+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
31319 > VMW_FENCE_WRAP);
31320
31321 return ret;
31322@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31323
31324 if (fifo_idle)
31325 down_read(&fifo_state->rwsem);
31326- signal_seq = atomic_read(&dev_priv->marker_seq);
31327+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
31328 ret = 0;
31329
31330 for (;;) {
31331diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31332index 8a8725c..afed796 100644
31333--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31334+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31335@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31336 while (!vmw_lag_lt(queue, us)) {
31337 spin_lock(&queue->lock);
31338 if (list_empty(&queue->head))
31339- seqno = atomic_read(&dev_priv->marker_seq);
31340+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31341 else {
31342 marker = list_first_entry(&queue->head,
31343 struct vmw_marker, head);
31344diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
31345index 1f6957c..b579481 100644
31346--- a/drivers/hid/hid-core.c
31347+++ b/drivers/hid/hid-core.c
31348@@ -2153,7 +2153,7 @@ static bool hid_ignore(struct hid_device *hdev)
31349
31350 int hid_add_device(struct hid_device *hdev)
31351 {
31352- static atomic_t id = ATOMIC_INIT(0);
31353+ static atomic_unchecked_t id = ATOMIC_INIT(0);
31354 int ret;
31355
31356 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31357@@ -2188,7 +2188,7 @@ int hid_add_device(struct hid_device *hdev)
31358 /* XXX hack, any other cleaner solution after the driver core
31359 * is converted to allow more than 20 bytes as the device name? */
31360 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31361- hdev->vendor, hdev->product, atomic_inc_return(&id));
31362+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31363
31364 hid_debug_register(hdev, dev_name(&hdev->dev));
31365 ret = device_add(&hdev->dev);
31366diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
31367index eec3291..8ed706b 100644
31368--- a/drivers/hid/hid-wiimote-debug.c
31369+++ b/drivers/hid/hid-wiimote-debug.c
31370@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
31371 else if (size == 0)
31372 return -EIO;
31373
31374- if (copy_to_user(u, buf, size))
31375+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
31376 return -EFAULT;
31377
31378 *off += size;
31379diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
31380index 14599e2..711c965 100644
31381--- a/drivers/hid/usbhid/hiddev.c
31382+++ b/drivers/hid/usbhid/hiddev.c
31383@@ -625,7 +625,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31384 break;
31385
31386 case HIDIOCAPPLICATION:
31387- if (arg < 0 || arg >= hid->maxapplication)
31388+ if (arg >= hid->maxapplication)
31389 break;
31390
31391 for (i = 0; i < hid->maxcollection; i++)
31392diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31393index 4065374..10ed7dc 100644
31394--- a/drivers/hv/channel.c
31395+++ b/drivers/hv/channel.c
31396@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31397 int ret = 0;
31398 int t;
31399
31400- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31401- atomic_inc(&vmbus_connection.next_gpadl_handle);
31402+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31403+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31404
31405 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31406 if (ret)
31407diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
31408index 86f8885..ab9cb2b 100644
31409--- a/drivers/hv/hv.c
31410+++ b/drivers/hv/hv.c
31411@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31412 u64 output_address = (output) ? virt_to_phys(output) : 0;
31413 u32 output_address_hi = output_address >> 32;
31414 u32 output_address_lo = output_address & 0xFFFFFFFF;
31415- void *hypercall_page = hv_context.hypercall_page;
31416+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31417
31418 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31419 "=a"(hv_status_lo) : "d" (control_hi),
31420diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
31421index b9426a6..677ce34 100644
31422--- a/drivers/hv/hyperv_vmbus.h
31423+++ b/drivers/hv/hyperv_vmbus.h
31424@@ -555,7 +555,7 @@ enum vmbus_connect_state {
31425 struct vmbus_connection {
31426 enum vmbus_connect_state conn_state;
31427
31428- atomic_t next_gpadl_handle;
31429+ atomic_unchecked_t next_gpadl_handle;
31430
31431 /*
31432 * Represents channel interrupts. Each bit position represents a
31433diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
31434index a220e57..428f54d 100644
31435--- a/drivers/hv/vmbus_drv.c
31436+++ b/drivers/hv/vmbus_drv.c
31437@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31438 {
31439 int ret = 0;
31440
31441- static atomic_t device_num = ATOMIC_INIT(0);
31442+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31443
31444 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31445- atomic_inc_return(&device_num));
31446+ atomic_inc_return_unchecked(&device_num));
31447
31448 child_device_obj->device.bus = &hv_bus;
31449 child_device_obj->device.parent = &hv_acpi_dev->dev;
31450diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
31451index 34ad5a2..e2b0ae8 100644
31452--- a/drivers/hwmon/acpi_power_meter.c
31453+++ b/drivers/hwmon/acpi_power_meter.c
31454@@ -308,8 +308,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
31455 return res;
31456
31457 temp /= 1000;
31458- if (temp < 0)
31459- return -EINVAL;
31460
31461 mutex_lock(&resource->lock);
31462 resource->trip[attr->index - 7] = temp;
31463diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
31464index 8b011d0..3de24a1 100644
31465--- a/drivers/hwmon/sht15.c
31466+++ b/drivers/hwmon/sht15.c
31467@@ -166,7 +166,7 @@ struct sht15_data {
31468 int supply_uV;
31469 bool supply_uV_valid;
31470 struct work_struct update_supply_work;
31471- atomic_t interrupt_handled;
31472+ atomic_unchecked_t interrupt_handled;
31473 };
31474
31475 /**
31476@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
31477 return ret;
31478
31479 gpio_direction_input(data->pdata->gpio_data);
31480- atomic_set(&data->interrupt_handled, 0);
31481+ atomic_set_unchecked(&data->interrupt_handled, 0);
31482
31483 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31484 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31485 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31486 /* Only relevant if the interrupt hasn't occurred. */
31487- if (!atomic_read(&data->interrupt_handled))
31488+ if (!atomic_read_unchecked(&data->interrupt_handled))
31489 schedule_work(&data->read_work);
31490 }
31491 ret = wait_event_timeout(data->wait_queue,
31492@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
31493
31494 /* First disable the interrupt */
31495 disable_irq_nosync(irq);
31496- atomic_inc(&data->interrupt_handled);
31497+ atomic_inc_unchecked(&data->interrupt_handled);
31498 /* Then schedule a reading work struct */
31499 if (data->state != SHT15_READING_NOTHING)
31500 schedule_work(&data->read_work);
31501@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
31502 * If not, then start the interrupt again - care here as could
31503 * have gone low in meantime so verify it hasn't!
31504 */
31505- atomic_set(&data->interrupt_handled, 0);
31506+ atomic_set_unchecked(&data->interrupt_handled, 0);
31507 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31508 /* If still not occurred or another handler was scheduled */
31509 if (gpio_get_value(data->pdata->gpio_data)
31510- || atomic_read(&data->interrupt_handled))
31511+ || atomic_read_unchecked(&data->interrupt_handled))
31512 return;
31513 }
31514
31515diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31516index 378fcb5..5e91fa8 100644
31517--- a/drivers/i2c/busses/i2c-amd756-s4882.c
31518+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
31519@@ -43,7 +43,7 @@
31520 extern struct i2c_adapter amd756_smbus;
31521
31522 static struct i2c_adapter *s4882_adapter;
31523-static struct i2c_algorithm *s4882_algo;
31524+static i2c_algorithm_no_const *s4882_algo;
31525
31526 /* Wrapper access functions for multiplexed SMBus */
31527 static DEFINE_MUTEX(amd756_lock);
31528diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31529index 29015eb..af2d8e9 100644
31530--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31531+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
31532@@ -41,7 +41,7 @@
31533 extern struct i2c_adapter *nforce2_smbus;
31534
31535 static struct i2c_adapter *s4985_adapter;
31536-static struct i2c_algorithm *s4985_algo;
31537+static i2c_algorithm_no_const *s4985_algo;
31538
31539 /* Wrapper access functions for multiplexed SMBus */
31540 static DEFINE_MUTEX(nforce2_lock);
31541diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31542index 1038c38..eb92f51 100644
31543--- a/drivers/i2c/i2c-mux.c
31544+++ b/drivers/i2c/i2c-mux.c
31545@@ -30,7 +30,7 @@
31546 /* multiplexer per channel data */
31547 struct i2c_mux_priv {
31548 struct i2c_adapter adap;
31549- struct i2c_algorithm algo;
31550+ i2c_algorithm_no_const algo;
31551
31552 struct i2c_adapter *parent;
31553 void *mux_priv; /* the mux chip/device */
31554diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31555index 57d00ca..0145194 100644
31556--- a/drivers/ide/aec62xx.c
31557+++ b/drivers/ide/aec62xx.c
31558@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
31559 .cable_detect = atp86x_cable_detect,
31560 };
31561
31562-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31563+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31564 { /* 0: AEC6210 */
31565 .name = DRV_NAME,
31566 .init_chipset = init_chipset_aec62xx,
31567diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31568index 2c8016a..911a27c 100644
31569--- a/drivers/ide/alim15x3.c
31570+++ b/drivers/ide/alim15x3.c
31571@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
31572 .dma_sff_read_status = ide_dma_sff_read_status,
31573 };
31574
31575-static const struct ide_port_info ali15x3_chipset __devinitdata = {
31576+static const struct ide_port_info ali15x3_chipset __devinitconst = {
31577 .name = DRV_NAME,
31578 .init_chipset = init_chipset_ali15x3,
31579 .init_hwif = init_hwif_ali15x3,
31580diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31581index 3747b25..56fc995 100644
31582--- a/drivers/ide/amd74xx.c
31583+++ b/drivers/ide/amd74xx.c
31584@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
31585 .udma_mask = udma, \
31586 }
31587
31588-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31589+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31590 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31591 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
31592 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
31593diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
31594index 15f0ead..cb43480 100644
31595--- a/drivers/ide/atiixp.c
31596+++ b/drivers/ide/atiixp.c
31597@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
31598 .cable_detect = atiixp_cable_detect,
31599 };
31600
31601-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
31602+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
31603 { /* 0: IXP200/300/400/700 */
31604 .name = DRV_NAME,
31605 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
31606diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
31607index 5f80312..d1fc438 100644
31608--- a/drivers/ide/cmd64x.c
31609+++ b/drivers/ide/cmd64x.c
31610@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
31611 .dma_sff_read_status = ide_dma_sff_read_status,
31612 };
31613
31614-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
31615+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
31616 { /* 0: CMD643 */
31617 .name = DRV_NAME,
31618 .init_chipset = init_chipset_cmd64x,
31619diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
31620index 2c1e5f7..1444762 100644
31621--- a/drivers/ide/cs5520.c
31622+++ b/drivers/ide/cs5520.c
31623@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
31624 .set_dma_mode = cs5520_set_dma_mode,
31625 };
31626
31627-static const struct ide_port_info cyrix_chipset __devinitdata = {
31628+static const struct ide_port_info cyrix_chipset __devinitconst = {
31629 .name = DRV_NAME,
31630 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
31631 .port_ops = &cs5520_port_ops,
31632diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
31633index 4dc4eb9..49b40ad 100644
31634--- a/drivers/ide/cs5530.c
31635+++ b/drivers/ide/cs5530.c
31636@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
31637 .udma_filter = cs5530_udma_filter,
31638 };
31639
31640-static const struct ide_port_info cs5530_chipset __devinitdata = {
31641+static const struct ide_port_info cs5530_chipset __devinitconst = {
31642 .name = DRV_NAME,
31643 .init_chipset = init_chipset_cs5530,
31644 .init_hwif = init_hwif_cs5530,
31645diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
31646index 5059faf..18d4c85 100644
31647--- a/drivers/ide/cs5535.c
31648+++ b/drivers/ide/cs5535.c
31649@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
31650 .cable_detect = cs5535_cable_detect,
31651 };
31652
31653-static const struct ide_port_info cs5535_chipset __devinitdata = {
31654+static const struct ide_port_info cs5535_chipset __devinitconst = {
31655 .name = DRV_NAME,
31656 .port_ops = &cs5535_port_ops,
31657 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
31658diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
31659index 847553f..3ffb49d 100644
31660--- a/drivers/ide/cy82c693.c
31661+++ b/drivers/ide/cy82c693.c
31662@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
31663 .set_dma_mode = cy82c693_set_dma_mode,
31664 };
31665
31666-static const struct ide_port_info cy82c693_chipset __devinitdata = {
31667+static const struct ide_port_info cy82c693_chipset __devinitconst = {
31668 .name = DRV_NAME,
31669 .init_iops = init_iops_cy82c693,
31670 .port_ops = &cy82c693_port_ops,
31671diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
31672index 58c51cd..4aec3b8 100644
31673--- a/drivers/ide/hpt366.c
31674+++ b/drivers/ide/hpt366.c
31675@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
31676 }
31677 };
31678
31679-static const struct hpt_info hpt36x __devinitdata = {
31680+static const struct hpt_info hpt36x __devinitconst = {
31681 .chip_name = "HPT36x",
31682 .chip_type = HPT36x,
31683 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
31684@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
31685 .timings = &hpt36x_timings
31686 };
31687
31688-static const struct hpt_info hpt370 __devinitdata = {
31689+static const struct hpt_info hpt370 __devinitconst = {
31690 .chip_name = "HPT370",
31691 .chip_type = HPT370,
31692 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31693@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
31694 .timings = &hpt37x_timings
31695 };
31696
31697-static const struct hpt_info hpt370a __devinitdata = {
31698+static const struct hpt_info hpt370a __devinitconst = {
31699 .chip_name = "HPT370A",
31700 .chip_type = HPT370A,
31701 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31702@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
31703 .timings = &hpt37x_timings
31704 };
31705
31706-static const struct hpt_info hpt374 __devinitdata = {
31707+static const struct hpt_info hpt374 __devinitconst = {
31708 .chip_name = "HPT374",
31709 .chip_type = HPT374,
31710 .udma_mask = ATA_UDMA5,
31711@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
31712 .timings = &hpt37x_timings
31713 };
31714
31715-static const struct hpt_info hpt372 __devinitdata = {
31716+static const struct hpt_info hpt372 __devinitconst = {
31717 .chip_name = "HPT372",
31718 .chip_type = HPT372,
31719 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31720@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
31721 .timings = &hpt37x_timings
31722 };
31723
31724-static const struct hpt_info hpt372a __devinitdata = {
31725+static const struct hpt_info hpt372a __devinitconst = {
31726 .chip_name = "HPT372A",
31727 .chip_type = HPT372A,
31728 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31729@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
31730 .timings = &hpt37x_timings
31731 };
31732
31733-static const struct hpt_info hpt302 __devinitdata = {
31734+static const struct hpt_info hpt302 __devinitconst = {
31735 .chip_name = "HPT302",
31736 .chip_type = HPT302,
31737 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31738@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
31739 .timings = &hpt37x_timings
31740 };
31741
31742-static const struct hpt_info hpt371 __devinitdata = {
31743+static const struct hpt_info hpt371 __devinitconst = {
31744 .chip_name = "HPT371",
31745 .chip_type = HPT371,
31746 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31747@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
31748 .timings = &hpt37x_timings
31749 };
31750
31751-static const struct hpt_info hpt372n __devinitdata = {
31752+static const struct hpt_info hpt372n __devinitconst = {
31753 .chip_name = "HPT372N",
31754 .chip_type = HPT372N,
31755 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31756@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
31757 .timings = &hpt37x_timings
31758 };
31759
31760-static const struct hpt_info hpt302n __devinitdata = {
31761+static const struct hpt_info hpt302n __devinitconst = {
31762 .chip_name = "HPT302N",
31763 .chip_type = HPT302N,
31764 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31765@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
31766 .timings = &hpt37x_timings
31767 };
31768
31769-static const struct hpt_info hpt371n __devinitdata = {
31770+static const struct hpt_info hpt371n __devinitconst = {
31771 .chip_name = "HPT371N",
31772 .chip_type = HPT371N,
31773 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31774@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
31775 .dma_sff_read_status = ide_dma_sff_read_status,
31776 };
31777
31778-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
31779+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
31780 { /* 0: HPT36x */
31781 .name = DRV_NAME,
31782 .init_chipset = init_chipset_hpt366,
31783diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
31784index 8126824..55a2798 100644
31785--- a/drivers/ide/ide-cd.c
31786+++ b/drivers/ide/ide-cd.c
31787@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
31788 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31789 if ((unsigned long)buf & alignment
31790 || blk_rq_bytes(rq) & q->dma_pad_mask
31791- || object_is_on_stack(buf))
31792+ || object_starts_on_stack(buf))
31793 drive->dma = 0;
31794 }
31795 }
31796diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
31797index 7f56b73..dab5b67 100644
31798--- a/drivers/ide/ide-pci-generic.c
31799+++ b/drivers/ide/ide-pci-generic.c
31800@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
31801 .udma_mask = ATA_UDMA6, \
31802 }
31803
31804-static const struct ide_port_info generic_chipsets[] __devinitdata = {
31805+static const struct ide_port_info generic_chipsets[] __devinitconst = {
31806 /* 0: Unknown */
31807 DECLARE_GENERIC_PCI_DEV(0),
31808
31809diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31810index 560e66d..d5dd180 100644
31811--- a/drivers/ide/it8172.c
31812+++ b/drivers/ide/it8172.c
31813@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31814 .set_dma_mode = it8172_set_dma_mode,
31815 };
31816
31817-static const struct ide_port_info it8172_port_info __devinitdata = {
31818+static const struct ide_port_info it8172_port_info __devinitconst = {
31819 .name = DRV_NAME,
31820 .port_ops = &it8172_port_ops,
31821 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31822diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31823index 46816ba..1847aeb 100644
31824--- a/drivers/ide/it8213.c
31825+++ b/drivers/ide/it8213.c
31826@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31827 .cable_detect = it8213_cable_detect,
31828 };
31829
31830-static const struct ide_port_info it8213_chipset __devinitdata = {
31831+static const struct ide_port_info it8213_chipset __devinitconst = {
31832 .name = DRV_NAME,
31833 .enablebits = { {0x41, 0x80, 0x80} },
31834 .port_ops = &it8213_port_ops,
31835diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31836index 2e3169f..c5611db 100644
31837--- a/drivers/ide/it821x.c
31838+++ b/drivers/ide/it821x.c
31839@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31840 .cable_detect = it821x_cable_detect,
31841 };
31842
31843-static const struct ide_port_info it821x_chipset __devinitdata = {
31844+static const struct ide_port_info it821x_chipset __devinitconst = {
31845 .name = DRV_NAME,
31846 .init_chipset = init_chipset_it821x,
31847 .init_hwif = init_hwif_it821x,
31848diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31849index 74c2c4a..efddd7d 100644
31850--- a/drivers/ide/jmicron.c
31851+++ b/drivers/ide/jmicron.c
31852@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31853 .cable_detect = jmicron_cable_detect,
31854 };
31855
31856-static const struct ide_port_info jmicron_chipset __devinitdata = {
31857+static const struct ide_port_info jmicron_chipset __devinitconst = {
31858 .name = DRV_NAME,
31859 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31860 .port_ops = &jmicron_port_ops,
31861diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31862index 95327a2..73f78d8 100644
31863--- a/drivers/ide/ns87415.c
31864+++ b/drivers/ide/ns87415.c
31865@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31866 .dma_sff_read_status = superio_dma_sff_read_status,
31867 };
31868
31869-static const struct ide_port_info ns87415_chipset __devinitdata = {
31870+static const struct ide_port_info ns87415_chipset __devinitconst = {
31871 .name = DRV_NAME,
31872 .init_hwif = init_hwif_ns87415,
31873 .tp_ops = &ns87415_tp_ops,
31874diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31875index 1a53a4c..39edc66 100644
31876--- a/drivers/ide/opti621.c
31877+++ b/drivers/ide/opti621.c
31878@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31879 .set_pio_mode = opti621_set_pio_mode,
31880 };
31881
31882-static const struct ide_port_info opti621_chipset __devinitdata = {
31883+static const struct ide_port_info opti621_chipset __devinitconst = {
31884 .name = DRV_NAME,
31885 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31886 .port_ops = &opti621_port_ops,
31887diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31888index 9546fe2..2e5ceb6 100644
31889--- a/drivers/ide/pdc202xx_new.c
31890+++ b/drivers/ide/pdc202xx_new.c
31891@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31892 .udma_mask = udma, \
31893 }
31894
31895-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31896+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31897 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31898 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31899 };
31900diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31901index 3a35ec6..5634510 100644
31902--- a/drivers/ide/pdc202xx_old.c
31903+++ b/drivers/ide/pdc202xx_old.c
31904@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31905 .max_sectors = sectors, \
31906 }
31907
31908-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31909+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31910 { /* 0: PDC20246 */
31911 .name = DRV_NAME,
31912 .init_chipset = init_chipset_pdc202xx,
31913diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31914index 1892e81..fe0fd60 100644
31915--- a/drivers/ide/piix.c
31916+++ b/drivers/ide/piix.c
31917@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31918 .udma_mask = udma, \
31919 }
31920
31921-static const struct ide_port_info piix_pci_info[] __devinitdata = {
31922+static const struct ide_port_info piix_pci_info[] __devinitconst = {
31923 /* 0: MPIIX */
31924 { /*
31925 * MPIIX actually has only a single IDE channel mapped to
31926diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31927index a6414a8..c04173e 100644
31928--- a/drivers/ide/rz1000.c
31929+++ b/drivers/ide/rz1000.c
31930@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31931 }
31932 }
31933
31934-static const struct ide_port_info rz1000_chipset __devinitdata = {
31935+static const struct ide_port_info rz1000_chipset __devinitconst = {
31936 .name = DRV_NAME,
31937 .host_flags = IDE_HFLAG_NO_DMA,
31938 };
31939diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31940index 356b9b5..d4758eb 100644
31941--- a/drivers/ide/sc1200.c
31942+++ b/drivers/ide/sc1200.c
31943@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31944 .dma_sff_read_status = ide_dma_sff_read_status,
31945 };
31946
31947-static const struct ide_port_info sc1200_chipset __devinitdata = {
31948+static const struct ide_port_info sc1200_chipset __devinitconst = {
31949 .name = DRV_NAME,
31950 .port_ops = &sc1200_port_ops,
31951 .dma_ops = &sc1200_dma_ops,
31952diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31953index b7f5b0c..9701038 100644
31954--- a/drivers/ide/scc_pata.c
31955+++ b/drivers/ide/scc_pata.c
31956@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
31957 .dma_sff_read_status = scc_dma_sff_read_status,
31958 };
31959
31960-static const struct ide_port_info scc_chipset __devinitdata = {
31961+static const struct ide_port_info scc_chipset __devinitconst = {
31962 .name = "sccIDE",
31963 .init_iops = init_iops_scc,
31964 .init_dma = scc_init_dma,
31965diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31966index 35fb8da..24d72ef 100644
31967--- a/drivers/ide/serverworks.c
31968+++ b/drivers/ide/serverworks.c
31969@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
31970 .cable_detect = svwks_cable_detect,
31971 };
31972
31973-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31974+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31975 { /* 0: OSB4 */
31976 .name = DRV_NAME,
31977 .init_chipset = init_chipset_svwks,
31978diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31979index ddeda44..46f7e30 100644
31980--- a/drivers/ide/siimage.c
31981+++ b/drivers/ide/siimage.c
31982@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
31983 .udma_mask = ATA_UDMA6, \
31984 }
31985
31986-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31987+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31988 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31989 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31990 };
31991diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31992index 4a00225..09e61b4 100644
31993--- a/drivers/ide/sis5513.c
31994+++ b/drivers/ide/sis5513.c
31995@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
31996 .cable_detect = sis_cable_detect,
31997 };
31998
31999-static const struct ide_port_info sis5513_chipset __devinitdata = {
32000+static const struct ide_port_info sis5513_chipset __devinitconst = {
32001 .name = DRV_NAME,
32002 .init_chipset = init_chipset_sis5513,
32003 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
32004diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
32005index f21dc2a..d051cd2 100644
32006--- a/drivers/ide/sl82c105.c
32007+++ b/drivers/ide/sl82c105.c
32008@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
32009 .dma_sff_read_status = ide_dma_sff_read_status,
32010 };
32011
32012-static const struct ide_port_info sl82c105_chipset __devinitdata = {
32013+static const struct ide_port_info sl82c105_chipset __devinitconst = {
32014 .name = DRV_NAME,
32015 .init_chipset = init_chipset_sl82c105,
32016 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
32017diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
32018index 864ffe0..863a5e9 100644
32019--- a/drivers/ide/slc90e66.c
32020+++ b/drivers/ide/slc90e66.c
32021@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
32022 .cable_detect = slc90e66_cable_detect,
32023 };
32024
32025-static const struct ide_port_info slc90e66_chipset __devinitdata = {
32026+static const struct ide_port_info slc90e66_chipset __devinitconst = {
32027 .name = DRV_NAME,
32028 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
32029 .port_ops = &slc90e66_port_ops,
32030diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
32031index 4799d5c..1794678 100644
32032--- a/drivers/ide/tc86c001.c
32033+++ b/drivers/ide/tc86c001.c
32034@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
32035 .dma_sff_read_status = ide_dma_sff_read_status,
32036 };
32037
32038-static const struct ide_port_info tc86c001_chipset __devinitdata = {
32039+static const struct ide_port_info tc86c001_chipset __devinitconst = {
32040 .name = DRV_NAME,
32041 .init_hwif = init_hwif_tc86c001,
32042 .port_ops = &tc86c001_port_ops,
32043diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
32044index 281c914..55ce1b8 100644
32045--- a/drivers/ide/triflex.c
32046+++ b/drivers/ide/triflex.c
32047@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
32048 .set_dma_mode = triflex_set_mode,
32049 };
32050
32051-static const struct ide_port_info triflex_device __devinitdata = {
32052+static const struct ide_port_info triflex_device __devinitconst = {
32053 .name = DRV_NAME,
32054 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
32055 .port_ops = &triflex_port_ops,
32056diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
32057index 4b42ca0..e494a98 100644
32058--- a/drivers/ide/trm290.c
32059+++ b/drivers/ide/trm290.c
32060@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
32061 .dma_check = trm290_dma_check,
32062 };
32063
32064-static const struct ide_port_info trm290_chipset __devinitdata = {
32065+static const struct ide_port_info trm290_chipset __devinitconst = {
32066 .name = DRV_NAME,
32067 .init_hwif = init_hwif_trm290,
32068 .tp_ops = &trm290_tp_ops,
32069diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
32070index f46f49c..eb77678 100644
32071--- a/drivers/ide/via82cxxx.c
32072+++ b/drivers/ide/via82cxxx.c
32073@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
32074 .cable_detect = via82cxxx_cable_detect,
32075 };
32076
32077-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
32078+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
32079 .name = DRV_NAME,
32080 .init_chipset = init_chipset_via82cxxx,
32081 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
32082diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
32083index 73d4531..c90cd2d 100644
32084--- a/drivers/ieee802154/fakehard.c
32085+++ b/drivers/ieee802154/fakehard.c
32086@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
32087 phy->transmit_power = 0xbf;
32088
32089 dev->netdev_ops = &fake_ops;
32090- dev->ml_priv = &fake_mlme;
32091+ dev->ml_priv = (void *)&fake_mlme;
32092
32093 priv = netdev_priv(dev);
32094 priv->phy = phy;
32095diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
32096index c889aae..6cf5aa7 100644
32097--- a/drivers/infiniband/core/cm.c
32098+++ b/drivers/infiniband/core/cm.c
32099@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
32100
32101 struct cm_counter_group {
32102 struct kobject obj;
32103- atomic_long_t counter[CM_ATTR_COUNT];
32104+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
32105 };
32106
32107 struct cm_counter_attribute {
32108@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
32109 struct ib_mad_send_buf *msg = NULL;
32110 int ret;
32111
32112- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32113+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32114 counter[CM_REQ_COUNTER]);
32115
32116 /* Quick state check to discard duplicate REQs. */
32117@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
32118 if (!cm_id_priv)
32119 return;
32120
32121- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32122+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32123 counter[CM_REP_COUNTER]);
32124 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
32125 if (ret)
32126@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
32127 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
32128 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
32129 spin_unlock_irq(&cm_id_priv->lock);
32130- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32131+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32132 counter[CM_RTU_COUNTER]);
32133 goto out;
32134 }
32135@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
32136 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
32137 dreq_msg->local_comm_id);
32138 if (!cm_id_priv) {
32139- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32140+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32141 counter[CM_DREQ_COUNTER]);
32142 cm_issue_drep(work->port, work->mad_recv_wc);
32143 return -EINVAL;
32144@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
32145 case IB_CM_MRA_REP_RCVD:
32146 break;
32147 case IB_CM_TIMEWAIT:
32148- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32149+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32150 counter[CM_DREQ_COUNTER]);
32151 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32152 goto unlock;
32153@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
32154 cm_free_msg(msg);
32155 goto deref;
32156 case IB_CM_DREQ_RCVD:
32157- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32158+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32159 counter[CM_DREQ_COUNTER]);
32160 goto unlock;
32161 default:
32162@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
32163 ib_modify_mad(cm_id_priv->av.port->mad_agent,
32164 cm_id_priv->msg, timeout)) {
32165 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
32166- atomic_long_inc(&work->port->
32167+ atomic_long_inc_unchecked(&work->port->
32168 counter_group[CM_RECV_DUPLICATES].
32169 counter[CM_MRA_COUNTER]);
32170 goto out;
32171@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
32172 break;
32173 case IB_CM_MRA_REQ_RCVD:
32174 case IB_CM_MRA_REP_RCVD:
32175- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32176+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32177 counter[CM_MRA_COUNTER]);
32178 /* fall through */
32179 default:
32180@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
32181 case IB_CM_LAP_IDLE:
32182 break;
32183 case IB_CM_MRA_LAP_SENT:
32184- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32185+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32186 counter[CM_LAP_COUNTER]);
32187 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32188 goto unlock;
32189@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
32190 cm_free_msg(msg);
32191 goto deref;
32192 case IB_CM_LAP_RCVD:
32193- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32194+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32195 counter[CM_LAP_COUNTER]);
32196 goto unlock;
32197 default:
32198@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
32199 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
32200 if (cur_cm_id_priv) {
32201 spin_unlock_irq(&cm.lock);
32202- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32203+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32204 counter[CM_SIDR_REQ_COUNTER]);
32205 goto out; /* Duplicate message. */
32206 }
32207@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
32208 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
32209 msg->retries = 1;
32210
32211- atomic_long_add(1 + msg->retries,
32212+ atomic_long_add_unchecked(1 + msg->retries,
32213 &port->counter_group[CM_XMIT].counter[attr_index]);
32214 if (msg->retries)
32215- atomic_long_add(msg->retries,
32216+ atomic_long_add_unchecked(msg->retries,
32217 &port->counter_group[CM_XMIT_RETRIES].
32218 counter[attr_index]);
32219
32220@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
32221 }
32222
32223 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
32224- atomic_long_inc(&port->counter_group[CM_RECV].
32225+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
32226 counter[attr_id - CM_ATTR_ID_OFFSET]);
32227
32228 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
32229@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
32230 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
32231
32232 return sprintf(buf, "%ld\n",
32233- atomic_long_read(&group->counter[cm_attr->index]));
32234+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
32235 }
32236
32237 static const struct sysfs_ops cm_counter_ops = {
32238diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
32239index 176c8f9..2627b62 100644
32240--- a/drivers/infiniband/core/fmr_pool.c
32241+++ b/drivers/infiniband/core/fmr_pool.c
32242@@ -98,8 +98,8 @@ struct ib_fmr_pool {
32243
32244 struct task_struct *thread;
32245
32246- atomic_t req_ser;
32247- atomic_t flush_ser;
32248+ atomic_unchecked_t req_ser;
32249+ atomic_unchecked_t flush_ser;
32250
32251 wait_queue_head_t force_wait;
32252 };
32253@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32254 struct ib_fmr_pool *pool = pool_ptr;
32255
32256 do {
32257- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
32258+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
32259 ib_fmr_batch_release(pool);
32260
32261- atomic_inc(&pool->flush_ser);
32262+ atomic_inc_unchecked(&pool->flush_ser);
32263 wake_up_interruptible(&pool->force_wait);
32264
32265 if (pool->flush_function)
32266@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32267 }
32268
32269 set_current_state(TASK_INTERRUPTIBLE);
32270- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
32271+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
32272 !kthread_should_stop())
32273 schedule();
32274 __set_current_state(TASK_RUNNING);
32275@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
32276 pool->dirty_watermark = params->dirty_watermark;
32277 pool->dirty_len = 0;
32278 spin_lock_init(&pool->pool_lock);
32279- atomic_set(&pool->req_ser, 0);
32280- atomic_set(&pool->flush_ser, 0);
32281+ atomic_set_unchecked(&pool->req_ser, 0);
32282+ atomic_set_unchecked(&pool->flush_ser, 0);
32283 init_waitqueue_head(&pool->force_wait);
32284
32285 pool->thread = kthread_run(ib_fmr_cleanup_thread,
32286@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
32287 }
32288 spin_unlock_irq(&pool->pool_lock);
32289
32290- serial = atomic_inc_return(&pool->req_ser);
32291+ serial = atomic_inc_return_unchecked(&pool->req_ser);
32292 wake_up_process(pool->thread);
32293
32294 if (wait_event_interruptible(pool->force_wait,
32295- atomic_read(&pool->flush_ser) - serial >= 0))
32296+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32297 return -EINTR;
32298
32299 return 0;
32300@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
32301 } else {
32302 list_add_tail(&fmr->list, &pool->dirty_list);
32303 if (++pool->dirty_len >= pool->dirty_watermark) {
32304- atomic_inc(&pool->req_ser);
32305+ atomic_inc_unchecked(&pool->req_ser);
32306 wake_up_process(pool->thread);
32307 }
32308 }
32309diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32310index 57e07c6..56d09d4 100644
32311--- a/drivers/infiniband/hw/cxgb4/mem.c
32312+++ b/drivers/infiniband/hw/cxgb4/mem.c
32313@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32314 int err;
32315 struct fw_ri_tpte tpt;
32316 u32 stag_idx;
32317- static atomic_t key;
32318+ static atomic_unchecked_t key;
32319
32320 if (c4iw_fatal_error(rdev))
32321 return -EIO;
32322@@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32323 if (rdev->stats.stag.cur > rdev->stats.stag.max)
32324 rdev->stats.stag.max = rdev->stats.stag.cur;
32325 mutex_unlock(&rdev->stats.lock);
32326- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32327+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32328 }
32329 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32330 __func__, stag_state, type, pdid, stag_idx);
32331diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32332index 79b3dbc..96e5fcc 100644
32333--- a/drivers/infiniband/hw/ipath/ipath_rc.c
32334+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32335@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32336 struct ib_atomic_eth *ateth;
32337 struct ipath_ack_entry *e;
32338 u64 vaddr;
32339- atomic64_t *maddr;
32340+ atomic64_unchecked_t *maddr;
32341 u64 sdata;
32342 u32 rkey;
32343 u8 next;
32344@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32345 IB_ACCESS_REMOTE_ATOMIC)))
32346 goto nack_acc_unlck;
32347 /* Perform atomic OP and save result. */
32348- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32349+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32350 sdata = be64_to_cpu(ateth->swap_data);
32351 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32352 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32353- (u64) atomic64_add_return(sdata, maddr) - sdata :
32354+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32355 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32356 be64_to_cpu(ateth->compare_data),
32357 sdata);
32358diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32359index 1f95bba..9530f87 100644
32360--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32361+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32362@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
32363 unsigned long flags;
32364 struct ib_wc wc;
32365 u64 sdata;
32366- atomic64_t *maddr;
32367+ atomic64_unchecked_t *maddr;
32368 enum ib_wc_status send_status;
32369
32370 /*
32371@@ -382,11 +382,11 @@ again:
32372 IB_ACCESS_REMOTE_ATOMIC)))
32373 goto acc_err;
32374 /* Perform atomic OP and save result. */
32375- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32376+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32377 sdata = wqe->wr.wr.atomic.compare_add;
32378 *(u64 *) sqp->s_sge.sge.vaddr =
32379 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32380- (u64) atomic64_add_return(sdata, maddr) - sdata :
32381+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32382 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32383 sdata, wqe->wr.wr.atomic.swap);
32384 goto send_comp;
32385diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
32386index 7140199..da60063 100644
32387--- a/drivers/infiniband/hw/nes/nes.c
32388+++ b/drivers/infiniband/hw/nes/nes.c
32389@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
32390 LIST_HEAD(nes_adapter_list);
32391 static LIST_HEAD(nes_dev_list);
32392
32393-atomic_t qps_destroyed;
32394+atomic_unchecked_t qps_destroyed;
32395
32396 static unsigned int ee_flsh_adapter;
32397 static unsigned int sysfs_nonidx_addr;
32398@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
32399 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32400 struct nes_adapter *nesadapter = nesdev->nesadapter;
32401
32402- atomic_inc(&qps_destroyed);
32403+ atomic_inc_unchecked(&qps_destroyed);
32404
32405 /* Free the control structures */
32406
32407diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
32408index c438e46..ca30356 100644
32409--- a/drivers/infiniband/hw/nes/nes.h
32410+++ b/drivers/infiniband/hw/nes/nes.h
32411@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
32412 extern unsigned int wqm_quanta;
32413 extern struct list_head nes_adapter_list;
32414
32415-extern atomic_t cm_connects;
32416-extern atomic_t cm_accepts;
32417-extern atomic_t cm_disconnects;
32418-extern atomic_t cm_closes;
32419-extern atomic_t cm_connecteds;
32420-extern atomic_t cm_connect_reqs;
32421-extern atomic_t cm_rejects;
32422-extern atomic_t mod_qp_timouts;
32423-extern atomic_t qps_created;
32424-extern atomic_t qps_destroyed;
32425-extern atomic_t sw_qps_destroyed;
32426+extern atomic_unchecked_t cm_connects;
32427+extern atomic_unchecked_t cm_accepts;
32428+extern atomic_unchecked_t cm_disconnects;
32429+extern atomic_unchecked_t cm_closes;
32430+extern atomic_unchecked_t cm_connecteds;
32431+extern atomic_unchecked_t cm_connect_reqs;
32432+extern atomic_unchecked_t cm_rejects;
32433+extern atomic_unchecked_t mod_qp_timouts;
32434+extern atomic_unchecked_t qps_created;
32435+extern atomic_unchecked_t qps_destroyed;
32436+extern atomic_unchecked_t sw_qps_destroyed;
32437 extern u32 mh_detected;
32438 extern u32 mh_pauses_sent;
32439 extern u32 cm_packets_sent;
32440@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
32441 extern u32 cm_packets_received;
32442 extern u32 cm_packets_dropped;
32443 extern u32 cm_packets_retrans;
32444-extern atomic_t cm_listens_created;
32445-extern atomic_t cm_listens_destroyed;
32446+extern atomic_unchecked_t cm_listens_created;
32447+extern atomic_unchecked_t cm_listens_destroyed;
32448 extern u32 cm_backlog_drops;
32449-extern atomic_t cm_loopbacks;
32450-extern atomic_t cm_nodes_created;
32451-extern atomic_t cm_nodes_destroyed;
32452-extern atomic_t cm_accel_dropped_pkts;
32453-extern atomic_t cm_resets_recvd;
32454-extern atomic_t pau_qps_created;
32455-extern atomic_t pau_qps_destroyed;
32456+extern atomic_unchecked_t cm_loopbacks;
32457+extern atomic_unchecked_t cm_nodes_created;
32458+extern atomic_unchecked_t cm_nodes_destroyed;
32459+extern atomic_unchecked_t cm_accel_dropped_pkts;
32460+extern atomic_unchecked_t cm_resets_recvd;
32461+extern atomic_unchecked_t pau_qps_created;
32462+extern atomic_unchecked_t pau_qps_destroyed;
32463
32464 extern u32 int_mod_timer_init;
32465 extern u32 int_mod_cq_depth_256;
32466diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
32467index 020e95c..fbb3450 100644
32468--- a/drivers/infiniband/hw/nes/nes_cm.c
32469+++ b/drivers/infiniband/hw/nes/nes_cm.c
32470@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32471 u32 cm_packets_retrans;
32472 u32 cm_packets_created;
32473 u32 cm_packets_received;
32474-atomic_t cm_listens_created;
32475-atomic_t cm_listens_destroyed;
32476+atomic_unchecked_t cm_listens_created;
32477+atomic_unchecked_t cm_listens_destroyed;
32478 u32 cm_backlog_drops;
32479-atomic_t cm_loopbacks;
32480-atomic_t cm_nodes_created;
32481-atomic_t cm_nodes_destroyed;
32482-atomic_t cm_accel_dropped_pkts;
32483-atomic_t cm_resets_recvd;
32484+atomic_unchecked_t cm_loopbacks;
32485+atomic_unchecked_t cm_nodes_created;
32486+atomic_unchecked_t cm_nodes_destroyed;
32487+atomic_unchecked_t cm_accel_dropped_pkts;
32488+atomic_unchecked_t cm_resets_recvd;
32489
32490 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32491 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32492@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
32493
32494 static struct nes_cm_core *g_cm_core;
32495
32496-atomic_t cm_connects;
32497-atomic_t cm_accepts;
32498-atomic_t cm_disconnects;
32499-atomic_t cm_closes;
32500-atomic_t cm_connecteds;
32501-atomic_t cm_connect_reqs;
32502-atomic_t cm_rejects;
32503+atomic_unchecked_t cm_connects;
32504+atomic_unchecked_t cm_accepts;
32505+atomic_unchecked_t cm_disconnects;
32506+atomic_unchecked_t cm_closes;
32507+atomic_unchecked_t cm_connecteds;
32508+atomic_unchecked_t cm_connect_reqs;
32509+atomic_unchecked_t cm_rejects;
32510
32511 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32512 {
32513@@ -1279,7 +1279,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
32514 kfree(listener);
32515 listener = NULL;
32516 ret = 0;
32517- atomic_inc(&cm_listens_destroyed);
32518+ atomic_inc_unchecked(&cm_listens_destroyed);
32519 } else {
32520 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32521 }
32522@@ -1482,7 +1482,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
32523 cm_node->rem_mac);
32524
32525 add_hte_node(cm_core, cm_node);
32526- atomic_inc(&cm_nodes_created);
32527+ atomic_inc_unchecked(&cm_nodes_created);
32528
32529 return cm_node;
32530 }
32531@@ -1540,7 +1540,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
32532 }
32533
32534 atomic_dec(&cm_core->node_cnt);
32535- atomic_inc(&cm_nodes_destroyed);
32536+ atomic_inc_unchecked(&cm_nodes_destroyed);
32537 nesqp = cm_node->nesqp;
32538 if (nesqp) {
32539 nesqp->cm_node = NULL;
32540@@ -1604,7 +1604,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
32541
32542 static void drop_packet(struct sk_buff *skb)
32543 {
32544- atomic_inc(&cm_accel_dropped_pkts);
32545+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
32546 dev_kfree_skb_any(skb);
32547 }
32548
32549@@ -1667,7 +1667,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
32550 {
32551
32552 int reset = 0; /* whether to send reset in case of err.. */
32553- atomic_inc(&cm_resets_recvd);
32554+ atomic_inc_unchecked(&cm_resets_recvd);
32555 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32556 " refcnt=%d\n", cm_node, cm_node->state,
32557 atomic_read(&cm_node->ref_count));
32558@@ -2308,7 +2308,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
32559 rem_ref_cm_node(cm_node->cm_core, cm_node);
32560 return NULL;
32561 }
32562- atomic_inc(&cm_loopbacks);
32563+ atomic_inc_unchecked(&cm_loopbacks);
32564 loopbackremotenode->loopbackpartner = cm_node;
32565 loopbackremotenode->tcp_cntxt.rcv_wscale =
32566 NES_CM_DEFAULT_RCV_WND_SCALE;
32567@@ -2583,7 +2583,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
32568 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32569 else {
32570 rem_ref_cm_node(cm_core, cm_node);
32571- atomic_inc(&cm_accel_dropped_pkts);
32572+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
32573 dev_kfree_skb_any(skb);
32574 }
32575 break;
32576@@ -2891,7 +2891,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32577
32578 if ((cm_id) && (cm_id->event_handler)) {
32579 if (issue_disconn) {
32580- atomic_inc(&cm_disconnects);
32581+ atomic_inc_unchecked(&cm_disconnects);
32582 cm_event.event = IW_CM_EVENT_DISCONNECT;
32583 cm_event.status = disconn_status;
32584 cm_event.local_addr = cm_id->local_addr;
32585@@ -2913,7 +2913,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32586 }
32587
32588 if (issue_close) {
32589- atomic_inc(&cm_closes);
32590+ atomic_inc_unchecked(&cm_closes);
32591 nes_disconnect(nesqp, 1);
32592
32593 cm_id->provider_data = nesqp;
32594@@ -3049,7 +3049,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32595
32596 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
32597 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
32598- atomic_inc(&cm_accepts);
32599+ atomic_inc_unchecked(&cm_accepts);
32600
32601 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
32602 netdev_refcnt_read(nesvnic->netdev));
32603@@ -3251,7 +3251,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
32604 struct nes_cm_core *cm_core;
32605 u8 *start_buff;
32606
32607- atomic_inc(&cm_rejects);
32608+ atomic_inc_unchecked(&cm_rejects);
32609 cm_node = (struct nes_cm_node *)cm_id->provider_data;
32610 loopback = cm_node->loopbackpartner;
32611 cm_core = cm_node->cm_core;
32612@@ -3311,7 +3311,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32613 ntohl(cm_id->local_addr.sin_addr.s_addr),
32614 ntohs(cm_id->local_addr.sin_port));
32615
32616- atomic_inc(&cm_connects);
32617+ atomic_inc_unchecked(&cm_connects);
32618 nesqp->active_conn = 1;
32619
32620 /* cache the cm_id in the qp */
32621@@ -3421,7 +3421,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
32622 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
32623 return err;
32624 }
32625- atomic_inc(&cm_listens_created);
32626+ atomic_inc_unchecked(&cm_listens_created);
32627 }
32628
32629 cm_id->add_ref(cm_id);
32630@@ -3522,7 +3522,7 @@ static void cm_event_connected(struct nes_cm_event *event)
32631
32632 if (nesqp->destroyed)
32633 return;
32634- atomic_inc(&cm_connecteds);
32635+ atomic_inc_unchecked(&cm_connecteds);
32636 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
32637 " local port 0x%04X. jiffies = %lu.\n",
32638 nesqp->hwqp.qp_id,
32639@@ -3709,7 +3709,7 @@ static void cm_event_reset(struct nes_cm_event *event)
32640
32641 cm_id->add_ref(cm_id);
32642 ret = cm_id->event_handler(cm_id, &cm_event);
32643- atomic_inc(&cm_closes);
32644+ atomic_inc_unchecked(&cm_closes);
32645 cm_event.event = IW_CM_EVENT_CLOSE;
32646 cm_event.status = 0;
32647 cm_event.provider_data = cm_id->provider_data;
32648@@ -3745,7 +3745,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
32649 return;
32650 cm_id = cm_node->cm_id;
32651
32652- atomic_inc(&cm_connect_reqs);
32653+ atomic_inc_unchecked(&cm_connect_reqs);
32654 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32655 cm_node, cm_id, jiffies);
32656
32657@@ -3785,7 +3785,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
32658 return;
32659 cm_id = cm_node->cm_id;
32660
32661- atomic_inc(&cm_connect_reqs);
32662+ atomic_inc_unchecked(&cm_connect_reqs);
32663 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32664 cm_node, cm_id, jiffies);
32665
32666diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
32667index 3ba7be3..c81f6ff 100644
32668--- a/drivers/infiniband/hw/nes/nes_mgt.c
32669+++ b/drivers/infiniband/hw/nes/nes_mgt.c
32670@@ -40,8 +40,8 @@
32671 #include "nes.h"
32672 #include "nes_mgt.h"
32673
32674-atomic_t pau_qps_created;
32675-atomic_t pau_qps_destroyed;
32676+atomic_unchecked_t pau_qps_created;
32677+atomic_unchecked_t pau_qps_destroyed;
32678
32679 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
32680 {
32681@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
32682 {
32683 struct sk_buff *skb;
32684 unsigned long flags;
32685- atomic_inc(&pau_qps_destroyed);
32686+ atomic_inc_unchecked(&pau_qps_destroyed);
32687
32688 /* Free packets that have not yet been forwarded */
32689 /* Lock is acquired by skb_dequeue when removing the skb */
32690@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
32691 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
32692 skb_queue_head_init(&nesqp->pau_list);
32693 spin_lock_init(&nesqp->pau_lock);
32694- atomic_inc(&pau_qps_created);
32695+ atomic_inc_unchecked(&pau_qps_created);
32696 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
32697 }
32698
32699diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
32700index f3a3ecf..57d311d 100644
32701--- a/drivers/infiniband/hw/nes/nes_nic.c
32702+++ b/drivers/infiniband/hw/nes/nes_nic.c
32703@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
32704 target_stat_values[++index] = mh_detected;
32705 target_stat_values[++index] = mh_pauses_sent;
32706 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32707- target_stat_values[++index] = atomic_read(&cm_connects);
32708- target_stat_values[++index] = atomic_read(&cm_accepts);
32709- target_stat_values[++index] = atomic_read(&cm_disconnects);
32710- target_stat_values[++index] = atomic_read(&cm_connecteds);
32711- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32712- target_stat_values[++index] = atomic_read(&cm_rejects);
32713- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32714- target_stat_values[++index] = atomic_read(&qps_created);
32715- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32716- target_stat_values[++index] = atomic_read(&qps_destroyed);
32717- target_stat_values[++index] = atomic_read(&cm_closes);
32718+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32719+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32720+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32721+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32722+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32723+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32724+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32725+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32726+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32727+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32728+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32729 target_stat_values[++index] = cm_packets_sent;
32730 target_stat_values[++index] = cm_packets_bounced;
32731 target_stat_values[++index] = cm_packets_created;
32732 target_stat_values[++index] = cm_packets_received;
32733 target_stat_values[++index] = cm_packets_dropped;
32734 target_stat_values[++index] = cm_packets_retrans;
32735- target_stat_values[++index] = atomic_read(&cm_listens_created);
32736- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
32737+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
32738+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
32739 target_stat_values[++index] = cm_backlog_drops;
32740- target_stat_values[++index] = atomic_read(&cm_loopbacks);
32741- target_stat_values[++index] = atomic_read(&cm_nodes_created);
32742- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32743- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32744- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32745+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32746+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32747+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32748+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32749+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32750 target_stat_values[++index] = nesadapter->free_4kpbl;
32751 target_stat_values[++index] = nesadapter->free_256pbl;
32752 target_stat_values[++index] = int_mod_timer_init;
32753 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
32754 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
32755 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
32756- target_stat_values[++index] = atomic_read(&pau_qps_created);
32757- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
32758+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
32759+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
32760 }
32761
32762 /**
32763diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
32764index 8b8812d..a5e1133 100644
32765--- a/drivers/infiniband/hw/nes/nes_verbs.c
32766+++ b/drivers/infiniband/hw/nes/nes_verbs.c
32767@@ -46,9 +46,9 @@
32768
32769 #include <rdma/ib_umem.h>
32770
32771-atomic_t mod_qp_timouts;
32772-atomic_t qps_created;
32773-atomic_t sw_qps_destroyed;
32774+atomic_unchecked_t mod_qp_timouts;
32775+atomic_unchecked_t qps_created;
32776+atomic_unchecked_t sw_qps_destroyed;
32777
32778 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32779
32780@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
32781 if (init_attr->create_flags)
32782 return ERR_PTR(-EINVAL);
32783
32784- atomic_inc(&qps_created);
32785+ atomic_inc_unchecked(&qps_created);
32786 switch (init_attr->qp_type) {
32787 case IB_QPT_RC:
32788 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
32789@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
32790 struct iw_cm_event cm_event;
32791 int ret = 0;
32792
32793- atomic_inc(&sw_qps_destroyed);
32794+ atomic_inc_unchecked(&sw_qps_destroyed);
32795 nesqp->destroyed = 1;
32796
32797 /* Blow away the connection if it exists. */
32798diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
32799index 7e62f41..4c2b8e2 100644
32800--- a/drivers/infiniband/hw/qib/qib.h
32801+++ b/drivers/infiniband/hw/qib/qib.h
32802@@ -51,6 +51,7 @@
32803 #include <linux/completion.h>
32804 #include <linux/kref.h>
32805 #include <linux/sched.h>
32806+#include <linux/slab.h>
32807
32808 #include "qib_common.h"
32809 #include "qib_verbs.h"
32810diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32811index da739d9..da1c7f4 100644
32812--- a/drivers/input/gameport/gameport.c
32813+++ b/drivers/input/gameport/gameport.c
32814@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32815 */
32816 static void gameport_init_port(struct gameport *gameport)
32817 {
32818- static atomic_t gameport_no = ATOMIC_INIT(0);
32819+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32820
32821 __module_get(THIS_MODULE);
32822
32823 mutex_init(&gameport->drv_mutex);
32824 device_initialize(&gameport->dev);
32825 dev_set_name(&gameport->dev, "gameport%lu",
32826- (unsigned long)atomic_inc_return(&gameport_no) - 1);
32827+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32828 gameport->dev.bus = &gameport_bus;
32829 gameport->dev.release = gameport_release_port;
32830 if (gameport->parent)
32831diff --git a/drivers/input/input.c b/drivers/input/input.c
32832index 8921c61..f5cd63d 100644
32833--- a/drivers/input/input.c
32834+++ b/drivers/input/input.c
32835@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32836 */
32837 int input_register_device(struct input_dev *dev)
32838 {
32839- static atomic_t input_no = ATOMIC_INIT(0);
32840+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32841 struct input_handler *handler;
32842 const char *path;
32843 int error;
32844@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32845 dev->setkeycode = input_default_setkeycode;
32846
32847 dev_set_name(&dev->dev, "input%ld",
32848- (unsigned long) atomic_inc_return(&input_no) - 1);
32849+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32850
32851 error = device_add(&dev->dev);
32852 if (error)
32853diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32854index 04c69af..5f92d00 100644
32855--- a/drivers/input/joystick/sidewinder.c
32856+++ b/drivers/input/joystick/sidewinder.c
32857@@ -30,6 +30,7 @@
32858 #include <linux/kernel.h>
32859 #include <linux/module.h>
32860 #include <linux/slab.h>
32861+#include <linux/sched.h>
32862 #include <linux/init.h>
32863 #include <linux/input.h>
32864 #include <linux/gameport.h>
32865diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32866index 83811e4..0822b90 100644
32867--- a/drivers/input/joystick/xpad.c
32868+++ b/drivers/input/joystick/xpad.c
32869@@ -726,7 +726,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32870
32871 static int xpad_led_probe(struct usb_xpad *xpad)
32872 {
32873- static atomic_t led_seq = ATOMIC_INIT(0);
32874+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32875 long led_no;
32876 struct xpad_led *led;
32877 struct led_classdev *led_cdev;
32878@@ -739,7 +739,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32879 if (!led)
32880 return -ENOMEM;
32881
32882- led_no = (long)atomic_inc_return(&led_seq) - 1;
32883+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32884
32885 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32886 led->xpad = xpad;
32887diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32888index 0110b5a..d3ad144 100644
32889--- a/drivers/input/mousedev.c
32890+++ b/drivers/input/mousedev.c
32891@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32892
32893 spin_unlock_irq(&client->packet_lock);
32894
32895- if (copy_to_user(buffer, data, count))
32896+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
32897 return -EFAULT;
32898
32899 return count;
32900diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32901index d0f7533..fb8215b 100644
32902--- a/drivers/input/serio/serio.c
32903+++ b/drivers/input/serio/serio.c
32904@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
32905 */
32906 static void serio_init_port(struct serio *serio)
32907 {
32908- static atomic_t serio_no = ATOMIC_INIT(0);
32909+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32910
32911 __module_get(THIS_MODULE);
32912
32913@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
32914 mutex_init(&serio->drv_mutex);
32915 device_initialize(&serio->dev);
32916 dev_set_name(&serio->dev, "serio%ld",
32917- (long)atomic_inc_return(&serio_no) - 1);
32918+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
32919 serio->dev.bus = &serio_bus;
32920 serio->dev.release = serio_release_port;
32921 serio->dev.groups = serio_device_attr_groups;
32922diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32923index 38c4bd8..58965d9 100644
32924--- a/drivers/isdn/capi/capi.c
32925+++ b/drivers/isdn/capi/capi.c
32926@@ -83,8 +83,8 @@ struct capiminor {
32927
32928 struct capi20_appl *ap;
32929 u32 ncci;
32930- atomic_t datahandle;
32931- atomic_t msgid;
32932+ atomic_unchecked_t datahandle;
32933+ atomic_unchecked_t msgid;
32934
32935 struct tty_port port;
32936 int ttyinstop;
32937@@ -392,7 +392,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32938 capimsg_setu16(s, 2, mp->ap->applid);
32939 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32940 capimsg_setu8 (s, 5, CAPI_RESP);
32941- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32942+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32943 capimsg_setu32(s, 8, mp->ncci);
32944 capimsg_setu16(s, 12, datahandle);
32945 }
32946@@ -513,14 +513,14 @@ static void handle_minor_send(struct capiminor *mp)
32947 mp->outbytes -= len;
32948 spin_unlock_bh(&mp->outlock);
32949
32950- datahandle = atomic_inc_return(&mp->datahandle);
32951+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32952 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32953 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32954 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32955 capimsg_setu16(skb->data, 2, mp->ap->applid);
32956 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32957 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32958- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32959+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32960 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32961 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32962 capimsg_setu16(skb->data, 16, len); /* Data length */
32963diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
32964index a6d9fd2..afdb8a3 100644
32965--- a/drivers/isdn/gigaset/interface.c
32966+++ b/drivers/isdn/gigaset/interface.c
32967@@ -160,9 +160,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
32968 }
32969 tty->driver_data = cs;
32970
32971- ++cs->port.count;
32972+ atomic_inc(&cs->port.count);
32973
32974- if (cs->port.count == 1) {
32975+ if (atomic_read(&cs->port.count) == 1) {
32976 tty_port_tty_set(&cs->port, tty);
32977 tty->low_latency = 1;
32978 }
32979@@ -186,9 +186,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
32980
32981 if (!cs->connected)
32982 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32983- else if (!cs->port.count)
32984+ else if (!atomic_read(&cs->port.count))
32985 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32986- else if (!--cs->port.count)
32987+ else if (!atomic_dec_return(&cs->port.count))
32988 tty_port_tty_set(&cs->port, NULL);
32989
32990 mutex_unlock(&cs->mutex);
32991diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
32992index 821f7ac..28d4030 100644
32993--- a/drivers/isdn/hardware/avm/b1.c
32994+++ b/drivers/isdn/hardware/avm/b1.c
32995@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
32996 }
32997 if (left) {
32998 if (t4file->user) {
32999- if (copy_from_user(buf, dp, left))
33000+ if (left > sizeof buf || copy_from_user(buf, dp, left))
33001 return -EFAULT;
33002 } else {
33003 memcpy(buf, dp, left);
33004@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
33005 }
33006 if (left) {
33007 if (config->user) {
33008- if (copy_from_user(buf, dp, left))
33009+ if (left > sizeof buf || copy_from_user(buf, dp, left))
33010 return -EFAULT;
33011 } else {
33012 memcpy(buf, dp, left);
33013diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
33014index dd6b53a..19d9ee6 100644
33015--- a/drivers/isdn/hardware/eicon/divasync.h
33016+++ b/drivers/isdn/hardware/eicon/divasync.h
33017@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
33018 } diva_didd_add_adapter_t;
33019 typedef struct _diva_didd_remove_adapter {
33020 IDI_CALL p_request;
33021-} diva_didd_remove_adapter_t;
33022+} __no_const diva_didd_remove_adapter_t;
33023 typedef struct _diva_didd_read_adapter_array {
33024 void *buffer;
33025 dword length;
33026diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
33027index d303e65..28bcb7b 100644
33028--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
33029+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
33030@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
33031 typedef struct _diva_os_idi_adapter_interface {
33032 diva_init_card_proc_t cleanup_adapter_proc;
33033 diva_cmd_card_proc_t cmd_proc;
33034-} diva_os_idi_adapter_interface_t;
33035+} __no_const diva_os_idi_adapter_interface_t;
33036
33037 typedef struct _diva_os_xdi_adapter {
33038 struct list_head link;
33039diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
33040index 7bc5067..fd36232 100644
33041--- a/drivers/isdn/i4l/isdn_tty.c
33042+++ b/drivers/isdn/i4l/isdn_tty.c
33043@@ -1505,9 +1505,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
33044 port = &info->port;
33045 #ifdef ISDN_DEBUG_MODEM_OPEN
33046 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
33047- port->count);
33048+ atomic_read(&port->count))
33049 #endif
33050- port->count++;
33051+ atomic_inc(&port->count);
33052 tty->driver_data = info;
33053 port->tty = tty;
33054 tty->port = port;
33055@@ -1553,7 +1553,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
33056 #endif
33057 return;
33058 }
33059- if ((tty->count == 1) && (port->count != 1)) {
33060+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
33061 /*
33062 * Uh, oh. tty->count is 1, which means that the tty
33063 * structure will be freed. Info->count should always
33064@@ -1562,15 +1562,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
33065 * serial port won't be shutdown.
33066 */
33067 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
33068- "info->count is %d\n", port->count);
33069- port->count = 1;
33070+ "info->count is %d\n", atomic_read(&port->count));
33071+ atomic_set(&port->count, 1);
33072 }
33073- if (--port->count < 0) {
33074+ if (atomic_dec_return(&port->count) < 0) {
33075 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
33076- info->line, port->count);
33077- port->count = 0;
33078+ info->line, atomic_read(&port->count));
33079+ atomic_set(&port->count, 0);
33080 }
33081- if (port->count) {
33082+ if (atomic_read(&port->count)) {
33083 #ifdef ISDN_DEBUG_MODEM_OPEN
33084 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
33085 #endif
33086@@ -1624,7 +1624,7 @@ isdn_tty_hangup(struct tty_struct *tty)
33087 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
33088 return;
33089 isdn_tty_shutdown(info);
33090- port->count = 0;
33091+ atomic_set(&port->count, 0);
33092 port->flags &= ~ASYNC_NORMAL_ACTIVE;
33093 port->tty = NULL;
33094 wake_up_interruptible(&port->open_wait);
33095@@ -1964,7 +1964,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
33096 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
33097 modem_info *info = &dev->mdm.info[i];
33098
33099- if (info->port.count == 0)
33100+ if (atomic_read(&info->port.count) == 0)
33101 continue;
33102 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
33103 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
33104diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
33105index e74df7c..03a03ba 100644
33106--- a/drivers/isdn/icn/icn.c
33107+++ b/drivers/isdn/icn/icn.c
33108@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
33109 if (count > len)
33110 count = len;
33111 if (user) {
33112- if (copy_from_user(msg, buf, count))
33113+ if (count > sizeof msg || copy_from_user(msg, buf, count))
33114 return -EFAULT;
33115 } else
33116 memcpy(msg, buf, count);
33117diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
33118index b5fdcb7..5b6c59f 100644
33119--- a/drivers/lguest/core.c
33120+++ b/drivers/lguest/core.c
33121@@ -92,9 +92,17 @@ static __init int map_switcher(void)
33122 * it's worked so far. The end address needs +1 because __get_vm_area
33123 * allocates an extra guard page, so we need space for that.
33124 */
33125+
33126+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
33127+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
33128+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
33129+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
33130+#else
33131 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
33132 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
33133 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
33134+#endif
33135+
33136 if (!switcher_vma) {
33137 err = -ENOMEM;
33138 printk("lguest: could not map switcher pages high\n");
33139@@ -119,7 +127,7 @@ static __init int map_switcher(void)
33140 * Now the Switcher is mapped at the right address, we can't fail!
33141 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
33142 */
33143- memcpy(switcher_vma->addr, start_switcher_text,
33144+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
33145 end_switcher_text - start_switcher_text);
33146
33147 printk(KERN_INFO "lguest: mapped switcher at %p\n",
33148diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
33149index 39809035..ce25c5e 100644
33150--- a/drivers/lguest/x86/core.c
33151+++ b/drivers/lguest/x86/core.c
33152@@ -59,7 +59,7 @@ static struct {
33153 /* Offset from where switcher.S was compiled to where we've copied it */
33154 static unsigned long switcher_offset(void)
33155 {
33156- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
33157+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
33158 }
33159
33160 /* This cpu's struct lguest_pages. */
33161@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
33162 * These copies are pretty cheap, so we do them unconditionally: */
33163 /* Save the current Host top-level page directory.
33164 */
33165+
33166+#ifdef CONFIG_PAX_PER_CPU_PGD
33167+ pages->state.host_cr3 = read_cr3();
33168+#else
33169 pages->state.host_cr3 = __pa(current->mm->pgd);
33170+#endif
33171+
33172 /*
33173 * Set up the Guest's page tables to see this CPU's pages (and no
33174 * other CPU's pages).
33175@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
33176 * compiled-in switcher code and the high-mapped copy we just made.
33177 */
33178 for (i = 0; i < IDT_ENTRIES; i++)
33179- default_idt_entries[i] += switcher_offset();
33180+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
33181
33182 /*
33183 * Set up the Switcher's per-cpu areas.
33184@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
33185 * it will be undisturbed when we switch. To change %cs and jump we
33186 * need this structure to feed to Intel's "lcall" instruction.
33187 */
33188- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
33189+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
33190 lguest_entry.segment = LGUEST_CS;
33191
33192 /*
33193diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
33194index 40634b0..4f5855e 100644
33195--- a/drivers/lguest/x86/switcher_32.S
33196+++ b/drivers/lguest/x86/switcher_32.S
33197@@ -87,6 +87,7 @@
33198 #include <asm/page.h>
33199 #include <asm/segment.h>
33200 #include <asm/lguest.h>
33201+#include <asm/processor-flags.h>
33202
33203 // We mark the start of the code to copy
33204 // It's placed in .text tho it's never run here
33205@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
33206 // Changes type when we load it: damn Intel!
33207 // For after we switch over our page tables
33208 // That entry will be read-only: we'd crash.
33209+
33210+#ifdef CONFIG_PAX_KERNEXEC
33211+ mov %cr0, %edx
33212+ xor $X86_CR0_WP, %edx
33213+ mov %edx, %cr0
33214+#endif
33215+
33216 movl $(GDT_ENTRY_TSS*8), %edx
33217 ltr %dx
33218
33219@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
33220 // Let's clear it again for our return.
33221 // The GDT descriptor of the Host
33222 // Points to the table after two "size" bytes
33223- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
33224+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
33225 // Clear "used" from type field (byte 5, bit 2)
33226- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
33227+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
33228+
33229+#ifdef CONFIG_PAX_KERNEXEC
33230+ mov %cr0, %eax
33231+ xor $X86_CR0_WP, %eax
33232+ mov %eax, %cr0
33233+#endif
33234
33235 // Once our page table's switched, the Guest is live!
33236 // The Host fades as we run this final step.
33237@@ -295,13 +309,12 @@ deliver_to_host:
33238 // I consulted gcc, and it gave
33239 // These instructions, which I gladly credit:
33240 leal (%edx,%ebx,8), %eax
33241- movzwl (%eax),%edx
33242- movl 4(%eax), %eax
33243- xorw %ax, %ax
33244- orl %eax, %edx
33245+ movl 4(%eax), %edx
33246+ movw (%eax), %dx
33247 // Now the address of the handler's in %edx
33248 // We call it now: its "iret" drops us home.
33249- jmp *%edx
33250+ ljmp $__KERNEL_CS, $1f
33251+1: jmp *%edx
33252
33253 // Every interrupt can come to us here
33254 // But we must truly tell each apart.
33255diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
33256index 20e5c2c..9e849a9 100644
33257--- a/drivers/macintosh/macio_asic.c
33258+++ b/drivers/macintosh/macio_asic.c
33259@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
33260 * MacIO is matched against any Apple ID, it's probe() function
33261 * will then decide wether it applies or not
33262 */
33263-static const struct pci_device_id __devinitdata pci_ids [] = { {
33264+static const struct pci_device_id __devinitconst pci_ids [] = { {
33265 .vendor = PCI_VENDOR_ID_APPLE,
33266 .device = PCI_ANY_ID,
33267 .subvendor = PCI_ANY_ID,
33268diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
33269index 15dbe03..743fc65 100644
33270--- a/drivers/md/bitmap.c
33271+++ b/drivers/md/bitmap.c
33272@@ -1786,7 +1786,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
33273 chunk_kb ? "KB" : "B");
33274 if (bitmap->storage.file) {
33275 seq_printf(seq, ", file: ");
33276- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
33277+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
33278 }
33279
33280 seq_printf(seq, "\n");
33281diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
33282index a1a3e6d..1918bfc 100644
33283--- a/drivers/md/dm-ioctl.c
33284+++ b/drivers/md/dm-ioctl.c
33285@@ -1590,7 +1590,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
33286 cmd == DM_LIST_VERSIONS_CMD)
33287 return 0;
33288
33289- if ((cmd == DM_DEV_CREATE_CMD)) {
33290+ if (cmd == DM_DEV_CREATE_CMD) {
33291 if (!*param->name) {
33292 DMWARN("name not supplied when creating device");
33293 return -EINVAL;
33294diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
33295index b58b7a3..8018b19 100644
33296--- a/drivers/md/dm-raid1.c
33297+++ b/drivers/md/dm-raid1.c
33298@@ -40,7 +40,7 @@ enum dm_raid1_error {
33299
33300 struct mirror {
33301 struct mirror_set *ms;
33302- atomic_t error_count;
33303+ atomic_unchecked_t error_count;
33304 unsigned long error_type;
33305 struct dm_dev *dev;
33306 sector_t offset;
33307@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
33308 struct mirror *m;
33309
33310 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
33311- if (!atomic_read(&m->error_count))
33312+ if (!atomic_read_unchecked(&m->error_count))
33313 return m;
33314
33315 return NULL;
33316@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
33317 * simple way to tell if a device has encountered
33318 * errors.
33319 */
33320- atomic_inc(&m->error_count);
33321+ atomic_inc_unchecked(&m->error_count);
33322
33323 if (test_and_set_bit(error_type, &m->error_type))
33324 return;
33325@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
33326 struct mirror *m = get_default_mirror(ms);
33327
33328 do {
33329- if (likely(!atomic_read(&m->error_count)))
33330+ if (likely(!atomic_read_unchecked(&m->error_count)))
33331 return m;
33332
33333 if (m-- == ms->mirror)
33334@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
33335 {
33336 struct mirror *default_mirror = get_default_mirror(m->ms);
33337
33338- return !atomic_read(&default_mirror->error_count);
33339+ return !atomic_read_unchecked(&default_mirror->error_count);
33340 }
33341
33342 static int mirror_available(struct mirror_set *ms, struct bio *bio)
33343@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
33344 */
33345 if (likely(region_in_sync(ms, region, 1)))
33346 m = choose_mirror(ms, bio->bi_sector);
33347- else if (m && atomic_read(&m->error_count))
33348+ else if (m && atomic_read_unchecked(&m->error_count))
33349 m = NULL;
33350
33351 if (likely(m))
33352@@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
33353 }
33354
33355 ms->mirror[mirror].ms = ms;
33356- atomic_set(&(ms->mirror[mirror].error_count), 0);
33357+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33358 ms->mirror[mirror].error_type = 0;
33359 ms->mirror[mirror].offset = offset;
33360
33361@@ -1352,7 +1352,7 @@ static void mirror_resume(struct dm_target *ti)
33362 */
33363 static char device_status_char(struct mirror *m)
33364 {
33365- if (!atomic_read(&(m->error_count)))
33366+ if (!atomic_read_unchecked(&(m->error_count)))
33367 return 'A';
33368
33369 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
33370diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
33371index 35c94ff..20d4c17 100644
33372--- a/drivers/md/dm-stripe.c
33373+++ b/drivers/md/dm-stripe.c
33374@@ -20,7 +20,7 @@ struct stripe {
33375 struct dm_dev *dev;
33376 sector_t physical_start;
33377
33378- atomic_t error_count;
33379+ atomic_unchecked_t error_count;
33380 };
33381
33382 struct stripe_c {
33383@@ -193,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
33384 kfree(sc);
33385 return r;
33386 }
33387- atomic_set(&(sc->stripe[i].error_count), 0);
33388+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33389 }
33390
33391 ti->private = sc;
33392@@ -315,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
33393 DMEMIT("%d ", sc->stripes);
33394 for (i = 0; i < sc->stripes; i++) {
33395 DMEMIT("%s ", sc->stripe[i].dev->name);
33396- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33397+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33398 'D' : 'A';
33399 }
33400 buffer[i] = '\0';
33401@@ -362,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
33402 */
33403 for (i = 0; i < sc->stripes; i++)
33404 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33405- atomic_inc(&(sc->stripe[i].error_count));
33406- if (atomic_read(&(sc->stripe[i].error_count)) <
33407+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
33408+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33409 DM_IO_ERROR_THRESHOLD)
33410 schedule_work(&sc->trigger_event);
33411 }
33412diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
33413index 2e227fb..44ead1f 100644
33414--- a/drivers/md/dm-table.c
33415+++ b/drivers/md/dm-table.c
33416@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
33417 if (!dev_size)
33418 return 0;
33419
33420- if ((start >= dev_size) || (start + len > dev_size)) {
33421+ if ((start >= dev_size) || (len > dev_size - start)) {
33422 DMWARN("%s: %s too small for target: "
33423 "start=%llu, len=%llu, dev_size=%llu",
33424 dm_device_name(ti->table->md), bdevname(bdev, b),
33425diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33426index 3e2907f..c28851a 100644
33427--- a/drivers/md/dm-thin-metadata.c
33428+++ b/drivers/md/dm-thin-metadata.c
33429@@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33430
33431 pmd->info.tm = tm;
33432 pmd->info.levels = 2;
33433- pmd->info.value_type.context = pmd->data_sm;
33434+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33435 pmd->info.value_type.size = sizeof(__le64);
33436 pmd->info.value_type.inc = data_block_inc;
33437 pmd->info.value_type.dec = data_block_dec;
33438@@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33439
33440 pmd->bl_info.tm = tm;
33441 pmd->bl_info.levels = 1;
33442- pmd->bl_info.value_type.context = pmd->data_sm;
33443+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33444 pmd->bl_info.value_type.size = sizeof(__le64);
33445 pmd->bl_info.value_type.inc = data_block_inc;
33446 pmd->bl_info.value_type.dec = data_block_dec;
33447diff --git a/drivers/md/dm.c b/drivers/md/dm.c
33448index e24143c..ce2f21a1 100644
33449--- a/drivers/md/dm.c
33450+++ b/drivers/md/dm.c
33451@@ -176,9 +176,9 @@ struct mapped_device {
33452 /*
33453 * Event handling.
33454 */
33455- atomic_t event_nr;
33456+ atomic_unchecked_t event_nr;
33457 wait_queue_head_t eventq;
33458- atomic_t uevent_seq;
33459+ atomic_unchecked_t uevent_seq;
33460 struct list_head uevent_list;
33461 spinlock_t uevent_lock; /* Protect access to uevent_list */
33462
33463@@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
33464 rwlock_init(&md->map_lock);
33465 atomic_set(&md->holders, 1);
33466 atomic_set(&md->open_count, 0);
33467- atomic_set(&md->event_nr, 0);
33468- atomic_set(&md->uevent_seq, 0);
33469+ atomic_set_unchecked(&md->event_nr, 0);
33470+ atomic_set_unchecked(&md->uevent_seq, 0);
33471 INIT_LIST_HEAD(&md->uevent_list);
33472 spin_lock_init(&md->uevent_lock);
33473
33474@@ -1980,7 +1980,7 @@ static void event_callback(void *context)
33475
33476 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33477
33478- atomic_inc(&md->event_nr);
33479+ atomic_inc_unchecked(&md->event_nr);
33480 wake_up(&md->eventq);
33481 }
33482
33483@@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
33484
33485 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33486 {
33487- return atomic_add_return(1, &md->uevent_seq);
33488+ return atomic_add_return_unchecked(1, &md->uevent_seq);
33489 }
33490
33491 uint32_t dm_get_event_nr(struct mapped_device *md)
33492 {
33493- return atomic_read(&md->event_nr);
33494+ return atomic_read_unchecked(&md->event_nr);
33495 }
33496
33497 int dm_wait_event(struct mapped_device *md, int event_nr)
33498 {
33499 return wait_event_interruptible(md->eventq,
33500- (event_nr != atomic_read(&md->event_nr)));
33501+ (event_nr != atomic_read_unchecked(&md->event_nr)));
33502 }
33503
33504 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
33505diff --git a/drivers/md/md.c b/drivers/md/md.c
33506index d5ab449..7e9ed7b 100644
33507--- a/drivers/md/md.c
33508+++ b/drivers/md/md.c
33509@@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
33510 * start build, activate spare
33511 */
33512 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33513-static atomic_t md_event_count;
33514+static atomic_unchecked_t md_event_count;
33515 void md_new_event(struct mddev *mddev)
33516 {
33517- atomic_inc(&md_event_count);
33518+ atomic_inc_unchecked(&md_event_count);
33519 wake_up(&md_event_waiters);
33520 }
33521 EXPORT_SYMBOL_GPL(md_new_event);
33522@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
33523 */
33524 static void md_new_event_inintr(struct mddev *mddev)
33525 {
33526- atomic_inc(&md_event_count);
33527+ atomic_inc_unchecked(&md_event_count);
33528 wake_up(&md_event_waiters);
33529 }
33530
33531@@ -1565,7 +1565,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
33532 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
33533 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
33534 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
33535- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33536+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33537
33538 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33539 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
33540@@ -1809,7 +1809,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
33541 else
33542 sb->resync_offset = cpu_to_le64(0);
33543
33544- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33545+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33546
33547 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33548 sb->size = cpu_to_le64(mddev->dev_sectors);
33549@@ -2803,7 +2803,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
33550 static ssize_t
33551 errors_show(struct md_rdev *rdev, char *page)
33552 {
33553- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33554+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33555 }
33556
33557 static ssize_t
33558@@ -2812,7 +2812,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
33559 char *e;
33560 unsigned long n = simple_strtoul(buf, &e, 10);
33561 if (*buf && (*e == 0 || *e == '\n')) {
33562- atomic_set(&rdev->corrected_errors, n);
33563+ atomic_set_unchecked(&rdev->corrected_errors, n);
33564 return len;
33565 }
33566 return -EINVAL;
33567@@ -3259,8 +3259,8 @@ int md_rdev_init(struct md_rdev *rdev)
33568 rdev->sb_loaded = 0;
33569 rdev->bb_page = NULL;
33570 atomic_set(&rdev->nr_pending, 0);
33571- atomic_set(&rdev->read_errors, 0);
33572- atomic_set(&rdev->corrected_errors, 0);
33573+ atomic_set_unchecked(&rdev->read_errors, 0);
33574+ atomic_set_unchecked(&rdev->corrected_errors, 0);
33575
33576 INIT_LIST_HEAD(&rdev->same_set);
33577 init_waitqueue_head(&rdev->blocked_wait);
33578@@ -6997,7 +6997,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33579
33580 spin_unlock(&pers_lock);
33581 seq_printf(seq, "\n");
33582- seq->poll_event = atomic_read(&md_event_count);
33583+ seq->poll_event = atomic_read_unchecked(&md_event_count);
33584 return 0;
33585 }
33586 if (v == (void*)2) {
33587@@ -7100,7 +7100,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
33588 return error;
33589
33590 seq = file->private_data;
33591- seq->poll_event = atomic_read(&md_event_count);
33592+ seq->poll_event = atomic_read_unchecked(&md_event_count);
33593 return error;
33594 }
33595
33596@@ -7114,7 +7114,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
33597 /* always allow read */
33598 mask = POLLIN | POLLRDNORM;
33599
33600- if (seq->poll_event != atomic_read(&md_event_count))
33601+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
33602 mask |= POLLERR | POLLPRI;
33603 return mask;
33604 }
33605@@ -7158,7 +7158,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
33606 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33607 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33608 (int)part_stat_read(&disk->part0, sectors[1]) -
33609- atomic_read(&disk->sync_io);
33610+ atomic_read_unchecked(&disk->sync_io);
33611 /* sync IO will cause sync_io to increase before the disk_stats
33612 * as sync_io is counted when a request starts, and
33613 * disk_stats is counted when it completes.
33614diff --git a/drivers/md/md.h b/drivers/md/md.h
33615index 7b4a3c3..994ad4f 100644
33616--- a/drivers/md/md.h
33617+++ b/drivers/md/md.h
33618@@ -94,13 +94,13 @@ struct md_rdev {
33619 * only maintained for arrays that
33620 * support hot removal
33621 */
33622- atomic_t read_errors; /* number of consecutive read errors that
33623+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
33624 * we have tried to ignore.
33625 */
33626 struct timespec last_read_error; /* monotonic time since our
33627 * last read error
33628 */
33629- atomic_t corrected_errors; /* number of corrected read errors,
33630+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33631 * for reporting to userspace and storing
33632 * in superblock.
33633 */
33634@@ -435,7 +435,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
33635
33636 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33637 {
33638- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33639+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33640 }
33641
33642 struct md_personality
33643diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
33644index fc90c11..c8cd9a9 100644
33645--- a/drivers/md/persistent-data/dm-space-map-checker.c
33646+++ b/drivers/md/persistent-data/dm-space-map-checker.c
33647@@ -167,7 +167,7 @@ static int ca_commit(struct count_array *old, struct count_array *new)
33648 /*----------------------------------------------------------------*/
33649
33650 struct sm_checker {
33651- struct dm_space_map sm;
33652+ dm_space_map_no_const sm;
33653
33654 struct count_array old_counts;
33655 struct count_array counts;
33656diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
33657index 3d0ed53..35dc592 100644
33658--- a/drivers/md/persistent-data/dm-space-map-disk.c
33659+++ b/drivers/md/persistent-data/dm-space-map-disk.c
33660@@ -23,7 +23,7 @@
33661 * Space map interface.
33662 */
33663 struct sm_disk {
33664- struct dm_space_map sm;
33665+ dm_space_map_no_const sm;
33666
33667 struct ll_disk ll;
33668 struct ll_disk old_ll;
33669diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
33670index e89ae5e..062e4c2 100644
33671--- a/drivers/md/persistent-data/dm-space-map-metadata.c
33672+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
33673@@ -43,7 +43,7 @@ struct block_op {
33674 };
33675
33676 struct sm_metadata {
33677- struct dm_space_map sm;
33678+ dm_space_map_no_const sm;
33679
33680 struct ll_disk ll;
33681 struct ll_disk old_ll;
33682diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
33683index 1cbfc6b..56e1dbb 100644
33684--- a/drivers/md/persistent-data/dm-space-map.h
33685+++ b/drivers/md/persistent-data/dm-space-map.h
33686@@ -60,6 +60,7 @@ struct dm_space_map {
33687 int (*root_size)(struct dm_space_map *sm, size_t *result);
33688 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
33689 };
33690+typedef struct dm_space_map __no_const dm_space_map_no_const;
33691
33692 /*----------------------------------------------------------------*/
33693
33694diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
33695index 53aec45..250851c 100644
33696--- a/drivers/md/raid1.c
33697+++ b/drivers/md/raid1.c
33698@@ -1685,7 +1685,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
33699 if (r1_sync_page_io(rdev, sect, s,
33700 bio->bi_io_vec[idx].bv_page,
33701 READ) != 0)
33702- atomic_add(s, &rdev->corrected_errors);
33703+ atomic_add_unchecked(s, &rdev->corrected_errors);
33704 }
33705 sectors -= s;
33706 sect += s;
33707@@ -1907,7 +1907,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
33708 test_bit(In_sync, &rdev->flags)) {
33709 if (r1_sync_page_io(rdev, sect, s,
33710 conf->tmppage, READ)) {
33711- atomic_add(s, &rdev->corrected_errors);
33712+ atomic_add_unchecked(s, &rdev->corrected_errors);
33713 printk(KERN_INFO
33714 "md/raid1:%s: read error corrected "
33715 "(%d sectors at %llu on %s)\n",
33716diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
33717index 8da6282..8ec7103 100644
33718--- a/drivers/md/raid10.c
33719+++ b/drivers/md/raid10.c
33720@@ -1784,7 +1784,7 @@ static void end_sync_read(struct bio *bio, int error)
33721 /* The write handler will notice the lack of
33722 * R10BIO_Uptodate and record any errors etc
33723 */
33724- atomic_add(r10_bio->sectors,
33725+ atomic_add_unchecked(r10_bio->sectors,
33726 &conf->mirrors[d].rdev->corrected_errors);
33727
33728 /* for reconstruct, we always reschedule after a read.
33729@@ -2133,7 +2133,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33730 {
33731 struct timespec cur_time_mon;
33732 unsigned long hours_since_last;
33733- unsigned int read_errors = atomic_read(&rdev->read_errors);
33734+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
33735
33736 ktime_get_ts(&cur_time_mon);
33737
33738@@ -2155,9 +2155,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33739 * overflowing the shift of read_errors by hours_since_last.
33740 */
33741 if (hours_since_last >= 8 * sizeof(read_errors))
33742- atomic_set(&rdev->read_errors, 0);
33743+ atomic_set_unchecked(&rdev->read_errors, 0);
33744 else
33745- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
33746+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
33747 }
33748
33749 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
33750@@ -2211,8 +2211,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33751 return;
33752
33753 check_decay_read_errors(mddev, rdev);
33754- atomic_inc(&rdev->read_errors);
33755- if (atomic_read(&rdev->read_errors) > max_read_errors) {
33756+ atomic_inc_unchecked(&rdev->read_errors);
33757+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33758 char b[BDEVNAME_SIZE];
33759 bdevname(rdev->bdev, b);
33760
33761@@ -2220,7 +2220,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33762 "md/raid10:%s: %s: Raid device exceeded "
33763 "read_error threshold [cur %d:max %d]\n",
33764 mdname(mddev), b,
33765- atomic_read(&rdev->read_errors), max_read_errors);
33766+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33767 printk(KERN_NOTICE
33768 "md/raid10:%s: %s: Failing raid device\n",
33769 mdname(mddev), b);
33770@@ -2375,7 +2375,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33771 sect +
33772 choose_data_offset(r10_bio, rdev)),
33773 bdevname(rdev->bdev, b));
33774- atomic_add(s, &rdev->corrected_errors);
33775+ atomic_add_unchecked(s, &rdev->corrected_errors);
33776 }
33777
33778 rdev_dec_pending(rdev, mddev);
33779diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33780index 04348d7..62a4b9b 100644
33781--- a/drivers/md/raid5.c
33782+++ b/drivers/md/raid5.c
33783@@ -1736,19 +1736,19 @@ static void raid5_end_read_request(struct bio * bi, int error)
33784 mdname(conf->mddev), STRIPE_SECTORS,
33785 (unsigned long long)s,
33786 bdevname(rdev->bdev, b));
33787- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33788+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33789 clear_bit(R5_ReadError, &sh->dev[i].flags);
33790 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33791 }
33792- if (atomic_read(&rdev->read_errors))
33793- atomic_set(&rdev->read_errors, 0);
33794+ if (atomic_read_unchecked(&rdev->read_errors))
33795+ atomic_set_unchecked(&rdev->read_errors, 0);
33796 } else {
33797 const char *bdn = bdevname(rdev->bdev, b);
33798 int retry = 0;
33799 int set_bad = 0;
33800
33801 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33802- atomic_inc(&rdev->read_errors);
33803+ atomic_inc_unchecked(&rdev->read_errors);
33804 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
33805 printk_ratelimited(
33806 KERN_WARNING
33807@@ -1776,7 +1776,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33808 mdname(conf->mddev),
33809 (unsigned long long)s,
33810 bdn);
33811- } else if (atomic_read(&rdev->read_errors)
33812+ } else if (atomic_read_unchecked(&rdev->read_errors)
33813 > conf->max_nr_stripes)
33814 printk(KERN_WARNING
33815 "md/raid:%s: Too many read errors, failing device %s.\n",
33816diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33817index 131b938..8572ed1 100644
33818--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33819+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33820@@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
33821 .subvendor = _subvend, .subdevice = _subdev, \
33822 .driver_data = (unsigned long)&_driverdata }
33823
33824-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33825+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33826 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33827 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33828 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33829diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33830index fa7188a..04a045e 100644
33831--- a/drivers/media/dvb/dvb-core/dvb_demux.h
33832+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33833@@ -73,7 +73,7 @@ struct dvb_demux_feed {
33834 union {
33835 dmx_ts_cb ts;
33836 dmx_section_cb sec;
33837- } cb;
33838+ } __no_const cb;
33839
33840 struct dvb_demux *demux;
33841 void *priv;
33842diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33843index 39eab73..60033e7 100644
33844--- a/drivers/media/dvb/dvb-core/dvbdev.c
33845+++ b/drivers/media/dvb/dvb-core/dvbdev.c
33846@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33847 const struct dvb_device *template, void *priv, int type)
33848 {
33849 struct dvb_device *dvbdev;
33850- struct file_operations *dvbdevfops;
33851+ file_operations_no_const *dvbdevfops;
33852 struct device *clsdev;
33853 int minor;
33854 int id;
33855diff --git a/drivers/media/dvb/dvb-usb/az6007.c b/drivers/media/dvb/dvb-usb/az6007.c
33856index 4008b9c..ce714f5 100644
33857--- a/drivers/media/dvb/dvb-usb/az6007.c
33858+++ b/drivers/media/dvb/dvb-usb/az6007.c
33859@@ -590,7 +590,7 @@ static int az6007_read_mac_addr(struct dvb_usb_device *d, u8 mac[6])
33860 int ret;
33861
33862 ret = az6007_read(d, AZ6007_READ_DATA, 6, 0, st->data, 6);
33863- memcpy(mac, st->data, sizeof(mac));
33864+ memcpy(mac, st->data, 6);
33865
33866 if (ret > 0)
33867 deb_info("%s: mac is %02x:%02x:%02x:%02x:%02x:%02x\n",
33868diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33869index 3940bb0..fb3952a 100644
33870--- a/drivers/media/dvb/dvb-usb/cxusb.c
33871+++ b/drivers/media/dvb/dvb-usb/cxusb.c
33872@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33873
33874 struct dib0700_adapter_state {
33875 int (*set_param_save) (struct dvb_frontend *);
33876-};
33877+} __no_const;
33878
33879 static int dib7070_set_param_override(struct dvb_frontend *fe)
33880 {
33881diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33882index 9382895..ac8093c 100644
33883--- a/drivers/media/dvb/dvb-usb/dw2102.c
33884+++ b/drivers/media/dvb/dvb-usb/dw2102.c
33885@@ -95,7 +95,7 @@ struct su3000_state {
33886
33887 struct s6x0_state {
33888 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33889-};
33890+} __no_const;
33891
33892 /* debug */
33893 static int dvb_usb_dw2102_debug;
33894diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33895index 404f63a..4796533 100644
33896--- a/drivers/media/dvb/frontends/dib3000.h
33897+++ b/drivers/media/dvb/frontends/dib3000.h
33898@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33899 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33900 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33901 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33902-};
33903+} __no_const;
33904
33905 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33906 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33907diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33908index 7539a5d..06531a6 100644
33909--- a/drivers/media/dvb/ngene/ngene-cards.c
33910+++ b/drivers/media/dvb/ngene/ngene-cards.c
33911@@ -478,7 +478,7 @@ static struct ngene_info ngene_info_m780 = {
33912
33913 /****************************************************************************/
33914
33915-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33916+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33917 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33918 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33919 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33920diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33921index 16a089f..1661b11 100644
33922--- a/drivers/media/radio/radio-cadet.c
33923+++ b/drivers/media/radio/radio-cadet.c
33924@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33925 unsigned char readbuf[RDS_BUFFER];
33926 int i = 0;
33927
33928+ if (count > RDS_BUFFER)
33929+ return -EFAULT;
33930 mutex_lock(&dev->lock);
33931 if (dev->rdsstat == 0) {
33932 dev->rdsstat = 1;
33933@@ -347,7 +349,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33934 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
33935 mutex_unlock(&dev->lock);
33936
33937- if (copy_to_user(data, readbuf, i))
33938+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
33939 return -EFAULT;
33940 return i;
33941 }
33942diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33943index 9cde353..8c6a1c3 100644
33944--- a/drivers/media/video/au0828/au0828.h
33945+++ b/drivers/media/video/au0828/au0828.h
33946@@ -191,7 +191,7 @@ struct au0828_dev {
33947
33948 /* I2C */
33949 struct i2c_adapter i2c_adap;
33950- struct i2c_algorithm i2c_algo;
33951+ i2c_algorithm_no_const i2c_algo;
33952 struct i2c_client i2c_client;
33953 u32 i2c_rc;
33954
33955diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
33956index 04bf662..e0ac026 100644
33957--- a/drivers/media/video/cx88/cx88-alsa.c
33958+++ b/drivers/media/video/cx88/cx88-alsa.c
33959@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
33960 * Only boards with eeprom and byte 1 at eeprom=1 have it
33961 */
33962
33963-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33964+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33965 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33966 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33967 {0, }
33968diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
33969index 88cf9d9..bbc4b2c 100644
33970--- a/drivers/media/video/omap/omap_vout.c
33971+++ b/drivers/media/video/omap/omap_vout.c
33972@@ -64,7 +64,6 @@ enum omap_vout_channels {
33973 OMAP_VIDEO2,
33974 };
33975
33976-static struct videobuf_queue_ops video_vbq_ops;
33977 /* Variables configurable through module params*/
33978 static u32 video1_numbuffers = 3;
33979 static u32 video2_numbuffers = 3;
33980@@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
33981 {
33982 struct videobuf_queue *q;
33983 struct omap_vout_device *vout = NULL;
33984+ static struct videobuf_queue_ops video_vbq_ops = {
33985+ .buf_setup = omap_vout_buffer_setup,
33986+ .buf_prepare = omap_vout_buffer_prepare,
33987+ .buf_release = omap_vout_buffer_release,
33988+ .buf_queue = omap_vout_buffer_queue,
33989+ };
33990
33991 vout = video_drvdata(file);
33992 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
33993@@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
33994 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
33995
33996 q = &vout->vbq;
33997- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
33998- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
33999- video_vbq_ops.buf_release = omap_vout_buffer_release;
34000- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
34001 spin_lock_init(&vout->vbq_lock);
34002
34003 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
34004diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34005index 036952f..80d356d 100644
34006--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34007+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34008@@ -196,7 +196,7 @@ struct pvr2_hdw {
34009
34010 /* I2C stuff */
34011 struct i2c_adapter i2c_adap;
34012- struct i2c_algorithm i2c_algo;
34013+ i2c_algorithm_no_const i2c_algo;
34014 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
34015 int i2c_cx25840_hack_state;
34016 int i2c_linked;
34017diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
34018index 02194c0..091733b 100644
34019--- a/drivers/media/video/timblogiw.c
34020+++ b/drivers/media/video/timblogiw.c
34021@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
34022
34023 /* Platform device functions */
34024
34025-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
34026+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
34027 .vidioc_querycap = timblogiw_querycap,
34028 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
34029 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
34030@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
34031 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
34032 };
34033
34034-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
34035+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
34036 .owner = THIS_MODULE,
34037 .open = timblogiw_open,
34038 .release = timblogiw_close,
34039diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
34040index d99db56..a16b959 100644
34041--- a/drivers/message/fusion/mptbase.c
34042+++ b/drivers/message/fusion/mptbase.c
34043@@ -6751,8 +6751,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
34044 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
34045 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
34046
34047+#ifdef CONFIG_GRKERNSEC_HIDESYM
34048+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
34049+#else
34050 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
34051 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
34052+#endif
34053+
34054 /*
34055 * Rounding UP to nearest 4-kB boundary here...
34056 */
34057diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
34058index 551262e..7551198 100644
34059--- a/drivers/message/fusion/mptsas.c
34060+++ b/drivers/message/fusion/mptsas.c
34061@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
34062 return 0;
34063 }
34064
34065+static inline void
34066+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34067+{
34068+ if (phy_info->port_details) {
34069+ phy_info->port_details->rphy = rphy;
34070+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34071+ ioc->name, rphy));
34072+ }
34073+
34074+ if (rphy) {
34075+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34076+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34077+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34078+ ioc->name, rphy, rphy->dev.release));
34079+ }
34080+}
34081+
34082 /* no mutex */
34083 static void
34084 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
34085@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
34086 return NULL;
34087 }
34088
34089-static inline void
34090-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34091-{
34092- if (phy_info->port_details) {
34093- phy_info->port_details->rphy = rphy;
34094- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34095- ioc->name, rphy));
34096- }
34097-
34098- if (rphy) {
34099- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34100- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34101- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34102- ioc->name, rphy, rphy->dev.release));
34103- }
34104-}
34105-
34106 static inline struct sas_port *
34107 mptsas_get_port(struct mptsas_phyinfo *phy_info)
34108 {
34109diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
34110index 0c3ced7..1fe34ec 100644
34111--- a/drivers/message/fusion/mptscsih.c
34112+++ b/drivers/message/fusion/mptscsih.c
34113@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
34114
34115 h = shost_priv(SChost);
34116
34117- if (h) {
34118- if (h->info_kbuf == NULL)
34119- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34120- return h->info_kbuf;
34121- h->info_kbuf[0] = '\0';
34122+ if (!h)
34123+ return NULL;
34124
34125- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34126- h->info_kbuf[size-1] = '\0';
34127- }
34128+ if (h->info_kbuf == NULL)
34129+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34130+ return h->info_kbuf;
34131+ h->info_kbuf[0] = '\0';
34132+
34133+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34134+ h->info_kbuf[size-1] = '\0';
34135
34136 return h->info_kbuf;
34137 }
34138diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
34139index 506c36f..b137580 100644
34140--- a/drivers/message/i2o/i2o_proc.c
34141+++ b/drivers/message/i2o/i2o_proc.c
34142@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
34143 "Array Controller Device"
34144 };
34145
34146-static char *chtostr(u8 * chars, int n)
34147-{
34148- char tmp[256];
34149- tmp[0] = 0;
34150- return strncat(tmp, (char *)chars, n);
34151-}
34152-
34153 static int i2o_report_query_status(struct seq_file *seq, int block_status,
34154 char *group)
34155 {
34156@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
34157
34158 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
34159 seq_printf(seq, "%-#8x", ddm_table.module_id);
34160- seq_printf(seq, "%-29s",
34161- chtostr(ddm_table.module_name_version, 28));
34162+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
34163 seq_printf(seq, "%9d ", ddm_table.data_size);
34164 seq_printf(seq, "%8d", ddm_table.code_size);
34165
34166@@ -927,8 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
34167
34168 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
34169 seq_printf(seq, "%-#8x", dst->module_id);
34170- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
34171- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
34172+ seq_printf(seq, "%-.28s", dst->module_name_version);
34173+ seq_printf(seq, "%-.8s", dst->date);
34174 seq_printf(seq, "%8d ", dst->module_size);
34175 seq_printf(seq, "%8d ", dst->mpb_size);
34176 seq_printf(seq, "0x%04x", dst->module_flags);
34177@@ -1259,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
34178 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
34179 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
34180 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
34181- seq_printf(seq, "Vendor info : %s\n",
34182- chtostr((u8 *) (work32 + 2), 16));
34183- seq_printf(seq, "Product info : %s\n",
34184- chtostr((u8 *) (work32 + 6), 16));
34185- seq_printf(seq, "Description : %s\n",
34186- chtostr((u8 *) (work32 + 10), 16));
34187- seq_printf(seq, "Product rev. : %s\n",
34188- chtostr((u8 *) (work32 + 14), 8));
34189+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
34190+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
34191+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
34192+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
34193
34194 seq_printf(seq, "Serial number : ");
34195 print_serial_number(seq, (u8 *) (work32 + 16),
34196@@ -1311,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
34197 }
34198
34199 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
34200- seq_printf(seq, "Module name : %s\n",
34201- chtostr(result.module_name, 24));
34202- seq_printf(seq, "Module revision : %s\n",
34203- chtostr(result.module_rev, 8));
34204+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
34205+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
34206
34207 seq_printf(seq, "Serial number : ");
34208 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
34209@@ -1345,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
34210 return 0;
34211 }
34212
34213- seq_printf(seq, "Device name : %s\n",
34214- chtostr(result.device_name, 64));
34215- seq_printf(seq, "Service name : %s\n",
34216- chtostr(result.service_name, 64));
34217- seq_printf(seq, "Physical name : %s\n",
34218- chtostr(result.physical_location, 64));
34219- seq_printf(seq, "Instance number : %s\n",
34220- chtostr(result.instance_number, 4));
34221+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
34222+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
34223+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
34224+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
34225
34226 return 0;
34227 }
34228diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
34229index a8c08f3..155fe3d 100644
34230--- a/drivers/message/i2o/iop.c
34231+++ b/drivers/message/i2o/iop.c
34232@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
34233
34234 spin_lock_irqsave(&c->context_list_lock, flags);
34235
34236- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
34237- atomic_inc(&c->context_list_counter);
34238+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
34239+ atomic_inc_unchecked(&c->context_list_counter);
34240
34241- entry->context = atomic_read(&c->context_list_counter);
34242+ entry->context = atomic_read_unchecked(&c->context_list_counter);
34243
34244 list_add(&entry->list, &c->context_list);
34245
34246@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
34247
34248 #if BITS_PER_LONG == 64
34249 spin_lock_init(&c->context_list_lock);
34250- atomic_set(&c->context_list_counter, 0);
34251+ atomic_set_unchecked(&c->context_list_counter, 0);
34252 INIT_LIST_HEAD(&c->context_list);
34253 #endif
34254
34255diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
34256index 7ce65f4..e66e9bc 100644
34257--- a/drivers/mfd/abx500-core.c
34258+++ b/drivers/mfd/abx500-core.c
34259@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
34260
34261 struct abx500_device_entry {
34262 struct list_head list;
34263- struct abx500_ops ops;
34264+ abx500_ops_no_const ops;
34265 struct device *dev;
34266 };
34267
34268diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
34269index 2ea9998..51dabee 100644
34270--- a/drivers/mfd/janz-cmodio.c
34271+++ b/drivers/mfd/janz-cmodio.c
34272@@ -13,6 +13,7 @@
34273
34274 #include <linux/kernel.h>
34275 #include <linux/module.h>
34276+#include <linux/slab.h>
34277 #include <linux/init.h>
34278 #include <linux/pci.h>
34279 #include <linux/interrupt.h>
34280diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
34281index a981e2a..5ca0c8b 100644
34282--- a/drivers/misc/lis3lv02d/lis3lv02d.c
34283+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
34284@@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
34285 * the lid is closed. This leads to interrupts as soon as a little move
34286 * is done.
34287 */
34288- atomic_inc(&lis3->count);
34289+ atomic_inc_unchecked(&lis3->count);
34290
34291 wake_up_interruptible(&lis3->misc_wait);
34292 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
34293@@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34294 if (lis3->pm_dev)
34295 pm_runtime_get_sync(lis3->pm_dev);
34296
34297- atomic_set(&lis3->count, 0);
34298+ atomic_set_unchecked(&lis3->count, 0);
34299 return 0;
34300 }
34301
34302@@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34303 add_wait_queue(&lis3->misc_wait, &wait);
34304 while (true) {
34305 set_current_state(TASK_INTERRUPTIBLE);
34306- data = atomic_xchg(&lis3->count, 0);
34307+ data = atomic_xchg_unchecked(&lis3->count, 0);
34308 if (data)
34309 break;
34310
34311@@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34312 struct lis3lv02d, miscdev);
34313
34314 poll_wait(file, &lis3->misc_wait, wait);
34315- if (atomic_read(&lis3->count))
34316+ if (atomic_read_unchecked(&lis3->count))
34317 return POLLIN | POLLRDNORM;
34318 return 0;
34319 }
34320diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
34321index 2b1482a..5d33616 100644
34322--- a/drivers/misc/lis3lv02d/lis3lv02d.h
34323+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
34324@@ -266,7 +266,7 @@ struct lis3lv02d {
34325 struct input_polled_dev *idev; /* input device */
34326 struct platform_device *pdev; /* platform device */
34327 struct regulator_bulk_data regulators[2];
34328- atomic_t count; /* interrupt count after last read */
34329+ atomic_unchecked_t count; /* interrupt count after last read */
34330 union axis_conversion ac; /* hw -> logical axis */
34331 int mapped_btns[3];
34332
34333diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
34334index 28adefe..08aad69 100644
34335--- a/drivers/misc/lkdtm.c
34336+++ b/drivers/misc/lkdtm.c
34337@@ -477,6 +477,8 @@ static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
34338 int i, n, out;
34339
34340 buf = (char *)__get_free_page(GFP_KERNEL);
34341+ if (buf == NULL)
34342+ return -ENOMEM;
34343
34344 n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
34345 for (i = 0; i < ARRAY_SIZE(cp_type); i++)
34346diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34347index 2f30bad..c4c13d0 100644
34348--- a/drivers/misc/sgi-gru/gruhandles.c
34349+++ b/drivers/misc/sgi-gru/gruhandles.c
34350@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
34351 unsigned long nsec;
34352
34353 nsec = CLKS2NSEC(clks);
34354- atomic_long_inc(&mcs_op_statistics[op].count);
34355- atomic_long_add(nsec, &mcs_op_statistics[op].total);
34356+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34357+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34358 if (mcs_op_statistics[op].max < nsec)
34359 mcs_op_statistics[op].max = nsec;
34360 }
34361diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
34362index 950dbe9..eeef0f8 100644
34363--- a/drivers/misc/sgi-gru/gruprocfs.c
34364+++ b/drivers/misc/sgi-gru/gruprocfs.c
34365@@ -32,9 +32,9 @@
34366
34367 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34368
34369-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34370+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34371 {
34372- unsigned long val = atomic_long_read(v);
34373+ unsigned long val = atomic_long_read_unchecked(v);
34374
34375 seq_printf(s, "%16lu %s\n", val, id);
34376 }
34377@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
34378
34379 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34380 for (op = 0; op < mcsop_last; op++) {
34381- count = atomic_long_read(&mcs_op_statistics[op].count);
34382- total = atomic_long_read(&mcs_op_statistics[op].total);
34383+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34384+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34385 max = mcs_op_statistics[op].max;
34386 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34387 count ? total / count : 0, max);
34388diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34389index 5c3ce24..4915ccb 100644
34390--- a/drivers/misc/sgi-gru/grutables.h
34391+++ b/drivers/misc/sgi-gru/grutables.h
34392@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34393 * GRU statistics.
34394 */
34395 struct gru_stats_s {
34396- atomic_long_t vdata_alloc;
34397- atomic_long_t vdata_free;
34398- atomic_long_t gts_alloc;
34399- atomic_long_t gts_free;
34400- atomic_long_t gms_alloc;
34401- atomic_long_t gms_free;
34402- atomic_long_t gts_double_allocate;
34403- atomic_long_t assign_context;
34404- atomic_long_t assign_context_failed;
34405- atomic_long_t free_context;
34406- atomic_long_t load_user_context;
34407- atomic_long_t load_kernel_context;
34408- atomic_long_t lock_kernel_context;
34409- atomic_long_t unlock_kernel_context;
34410- atomic_long_t steal_user_context;
34411- atomic_long_t steal_kernel_context;
34412- atomic_long_t steal_context_failed;
34413- atomic_long_t nopfn;
34414- atomic_long_t asid_new;
34415- atomic_long_t asid_next;
34416- atomic_long_t asid_wrap;
34417- atomic_long_t asid_reuse;
34418- atomic_long_t intr;
34419- atomic_long_t intr_cbr;
34420- atomic_long_t intr_tfh;
34421- atomic_long_t intr_spurious;
34422- atomic_long_t intr_mm_lock_failed;
34423- atomic_long_t call_os;
34424- atomic_long_t call_os_wait_queue;
34425- atomic_long_t user_flush_tlb;
34426- atomic_long_t user_unload_context;
34427- atomic_long_t user_exception;
34428- atomic_long_t set_context_option;
34429- atomic_long_t check_context_retarget_intr;
34430- atomic_long_t check_context_unload;
34431- atomic_long_t tlb_dropin;
34432- atomic_long_t tlb_preload_page;
34433- atomic_long_t tlb_dropin_fail_no_asid;
34434- atomic_long_t tlb_dropin_fail_upm;
34435- atomic_long_t tlb_dropin_fail_invalid;
34436- atomic_long_t tlb_dropin_fail_range_active;
34437- atomic_long_t tlb_dropin_fail_idle;
34438- atomic_long_t tlb_dropin_fail_fmm;
34439- atomic_long_t tlb_dropin_fail_no_exception;
34440- atomic_long_t tfh_stale_on_fault;
34441- atomic_long_t mmu_invalidate_range;
34442- atomic_long_t mmu_invalidate_page;
34443- atomic_long_t flush_tlb;
34444- atomic_long_t flush_tlb_gru;
34445- atomic_long_t flush_tlb_gru_tgh;
34446- atomic_long_t flush_tlb_gru_zero_asid;
34447+ atomic_long_unchecked_t vdata_alloc;
34448+ atomic_long_unchecked_t vdata_free;
34449+ atomic_long_unchecked_t gts_alloc;
34450+ atomic_long_unchecked_t gts_free;
34451+ atomic_long_unchecked_t gms_alloc;
34452+ atomic_long_unchecked_t gms_free;
34453+ atomic_long_unchecked_t gts_double_allocate;
34454+ atomic_long_unchecked_t assign_context;
34455+ atomic_long_unchecked_t assign_context_failed;
34456+ atomic_long_unchecked_t free_context;
34457+ atomic_long_unchecked_t load_user_context;
34458+ atomic_long_unchecked_t load_kernel_context;
34459+ atomic_long_unchecked_t lock_kernel_context;
34460+ atomic_long_unchecked_t unlock_kernel_context;
34461+ atomic_long_unchecked_t steal_user_context;
34462+ atomic_long_unchecked_t steal_kernel_context;
34463+ atomic_long_unchecked_t steal_context_failed;
34464+ atomic_long_unchecked_t nopfn;
34465+ atomic_long_unchecked_t asid_new;
34466+ atomic_long_unchecked_t asid_next;
34467+ atomic_long_unchecked_t asid_wrap;
34468+ atomic_long_unchecked_t asid_reuse;
34469+ atomic_long_unchecked_t intr;
34470+ atomic_long_unchecked_t intr_cbr;
34471+ atomic_long_unchecked_t intr_tfh;
34472+ atomic_long_unchecked_t intr_spurious;
34473+ atomic_long_unchecked_t intr_mm_lock_failed;
34474+ atomic_long_unchecked_t call_os;
34475+ atomic_long_unchecked_t call_os_wait_queue;
34476+ atomic_long_unchecked_t user_flush_tlb;
34477+ atomic_long_unchecked_t user_unload_context;
34478+ atomic_long_unchecked_t user_exception;
34479+ atomic_long_unchecked_t set_context_option;
34480+ atomic_long_unchecked_t check_context_retarget_intr;
34481+ atomic_long_unchecked_t check_context_unload;
34482+ atomic_long_unchecked_t tlb_dropin;
34483+ atomic_long_unchecked_t tlb_preload_page;
34484+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34485+ atomic_long_unchecked_t tlb_dropin_fail_upm;
34486+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
34487+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
34488+ atomic_long_unchecked_t tlb_dropin_fail_idle;
34489+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
34490+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34491+ atomic_long_unchecked_t tfh_stale_on_fault;
34492+ atomic_long_unchecked_t mmu_invalidate_range;
34493+ atomic_long_unchecked_t mmu_invalidate_page;
34494+ atomic_long_unchecked_t flush_tlb;
34495+ atomic_long_unchecked_t flush_tlb_gru;
34496+ atomic_long_unchecked_t flush_tlb_gru_tgh;
34497+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
34498
34499- atomic_long_t copy_gpa;
34500- atomic_long_t read_gpa;
34501+ atomic_long_unchecked_t copy_gpa;
34502+ atomic_long_unchecked_t read_gpa;
34503
34504- atomic_long_t mesq_receive;
34505- atomic_long_t mesq_receive_none;
34506- atomic_long_t mesq_send;
34507- atomic_long_t mesq_send_failed;
34508- atomic_long_t mesq_noop;
34509- atomic_long_t mesq_send_unexpected_error;
34510- atomic_long_t mesq_send_lb_overflow;
34511- atomic_long_t mesq_send_qlimit_reached;
34512- atomic_long_t mesq_send_amo_nacked;
34513- atomic_long_t mesq_send_put_nacked;
34514- atomic_long_t mesq_page_overflow;
34515- atomic_long_t mesq_qf_locked;
34516- atomic_long_t mesq_qf_noop_not_full;
34517- atomic_long_t mesq_qf_switch_head_failed;
34518- atomic_long_t mesq_qf_unexpected_error;
34519- atomic_long_t mesq_noop_unexpected_error;
34520- atomic_long_t mesq_noop_lb_overflow;
34521- atomic_long_t mesq_noop_qlimit_reached;
34522- atomic_long_t mesq_noop_amo_nacked;
34523- atomic_long_t mesq_noop_put_nacked;
34524- atomic_long_t mesq_noop_page_overflow;
34525+ atomic_long_unchecked_t mesq_receive;
34526+ atomic_long_unchecked_t mesq_receive_none;
34527+ atomic_long_unchecked_t mesq_send;
34528+ atomic_long_unchecked_t mesq_send_failed;
34529+ atomic_long_unchecked_t mesq_noop;
34530+ atomic_long_unchecked_t mesq_send_unexpected_error;
34531+ atomic_long_unchecked_t mesq_send_lb_overflow;
34532+ atomic_long_unchecked_t mesq_send_qlimit_reached;
34533+ atomic_long_unchecked_t mesq_send_amo_nacked;
34534+ atomic_long_unchecked_t mesq_send_put_nacked;
34535+ atomic_long_unchecked_t mesq_page_overflow;
34536+ atomic_long_unchecked_t mesq_qf_locked;
34537+ atomic_long_unchecked_t mesq_qf_noop_not_full;
34538+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
34539+ atomic_long_unchecked_t mesq_qf_unexpected_error;
34540+ atomic_long_unchecked_t mesq_noop_unexpected_error;
34541+ atomic_long_unchecked_t mesq_noop_lb_overflow;
34542+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
34543+ atomic_long_unchecked_t mesq_noop_amo_nacked;
34544+ atomic_long_unchecked_t mesq_noop_put_nacked;
34545+ atomic_long_unchecked_t mesq_noop_page_overflow;
34546
34547 };
34548
34549@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
34550 tghop_invalidate, mcsop_last};
34551
34552 struct mcs_op_statistic {
34553- atomic_long_t count;
34554- atomic_long_t total;
34555+ atomic_long_unchecked_t count;
34556+ atomic_long_unchecked_t total;
34557 unsigned long max;
34558 };
34559
34560@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
34561
34562 #define STAT(id) do { \
34563 if (gru_options & OPT_STATS) \
34564- atomic_long_inc(&gru_stats.id); \
34565+ atomic_long_inc_unchecked(&gru_stats.id); \
34566 } while (0)
34567
34568 #ifdef CONFIG_SGI_GRU_DEBUG
34569diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
34570index c862cd4..0d176fe 100644
34571--- a/drivers/misc/sgi-xp/xp.h
34572+++ b/drivers/misc/sgi-xp/xp.h
34573@@ -288,7 +288,7 @@ struct xpc_interface {
34574 xpc_notify_func, void *);
34575 void (*received) (short, int, void *);
34576 enum xp_retval (*partid_to_nasids) (short, void *);
34577-};
34578+} __no_const;
34579
34580 extern struct xpc_interface xpc_interface;
34581
34582diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
34583index b94d5f7..7f494c5 100644
34584--- a/drivers/misc/sgi-xp/xpc.h
34585+++ b/drivers/misc/sgi-xp/xpc.h
34586@@ -835,6 +835,7 @@ struct xpc_arch_operations {
34587 void (*received_payload) (struct xpc_channel *, void *);
34588 void (*notify_senders_of_disconnect) (struct xpc_channel *);
34589 };
34590+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
34591
34592 /* struct xpc_partition act_state values (for XPC HB) */
34593
34594@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
34595 /* found in xpc_main.c */
34596 extern struct device *xpc_part;
34597 extern struct device *xpc_chan;
34598-extern struct xpc_arch_operations xpc_arch_ops;
34599+extern xpc_arch_operations_no_const xpc_arch_ops;
34600 extern int xpc_disengage_timelimit;
34601 extern int xpc_disengage_timedout;
34602 extern int xpc_activate_IRQ_rcvd;
34603diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
34604index 8d082b4..aa749ae 100644
34605--- a/drivers/misc/sgi-xp/xpc_main.c
34606+++ b/drivers/misc/sgi-xp/xpc_main.c
34607@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
34608 .notifier_call = xpc_system_die,
34609 };
34610
34611-struct xpc_arch_operations xpc_arch_ops;
34612+xpc_arch_operations_no_const xpc_arch_ops;
34613
34614 /*
34615 * Timer function to enforce the timelimit on the partition disengage.
34616diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
34617index 2b62232..acfaeeb 100644
34618--- a/drivers/misc/ti-st/st_core.c
34619+++ b/drivers/misc/ti-st/st_core.c
34620@@ -349,6 +349,11 @@ void st_int_recv(void *disc_data,
34621 st_gdata->rx_skb = alloc_skb(
34622 st_gdata->list[type]->max_frame_size,
34623 GFP_ATOMIC);
34624+ if (st_gdata->rx_skb == NULL) {
34625+ pr_err("out of memory: dropping\n");
34626+ goto done;
34627+ }
34628+
34629 skb_reserve(st_gdata->rx_skb,
34630 st_gdata->list[type]->reserve);
34631 /* next 2 required for BT only */
34632diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
34633index 504da71..9722d43 100644
34634--- a/drivers/mmc/host/sdhci-pci.c
34635+++ b/drivers/mmc/host/sdhci-pci.c
34636@@ -653,7 +653,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
34637 .probe = via_probe,
34638 };
34639
34640-static const struct pci_device_id pci_ids[] __devinitdata = {
34641+static const struct pci_device_id pci_ids[] __devinitconst = {
34642 {
34643 .vendor = PCI_VENDOR_ID_RICOH,
34644 .device = PCI_DEVICE_ID_RICOH_R5C822,
34645diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
34646index a4eb8b5..8c0628f 100644
34647--- a/drivers/mtd/devices/doc2000.c
34648+++ b/drivers/mtd/devices/doc2000.c
34649@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
34650
34651 /* The ECC will not be calculated correctly if less than 512 is written */
34652 /* DBB-
34653- if (len != 0x200 && eccbuf)
34654+ if (len != 0x200)
34655 printk(KERN_WARNING
34656 "ECC needs a full sector write (adr: %lx size %lx)\n",
34657 (long) to, (long) len);
34658diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
34659index 0650aaf..7718762 100644
34660--- a/drivers/mtd/nand/denali.c
34661+++ b/drivers/mtd/nand/denali.c
34662@@ -26,6 +26,7 @@
34663 #include <linux/pci.h>
34664 #include <linux/mtd/mtd.h>
34665 #include <linux/module.h>
34666+#include <linux/slab.h>
34667
34668 #include "denali.h"
34669
34670diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
34671index 51b9d6a..52af9a7 100644
34672--- a/drivers/mtd/nftlmount.c
34673+++ b/drivers/mtd/nftlmount.c
34674@@ -24,6 +24,7 @@
34675 #include <asm/errno.h>
34676 #include <linux/delay.h>
34677 #include <linux/slab.h>
34678+#include <linux/sched.h>
34679 #include <linux/mtd/mtd.h>
34680 #include <linux/mtd/nand.h>
34681 #include <linux/mtd/nftl.h>
34682diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
34683index 6762dc4..9956862 100644
34684--- a/drivers/net/ethernet/atheros/atlx/atl2.c
34685+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
34686@@ -2859,7 +2859,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
34687 */
34688
34689 #define ATL2_PARAM(X, desc) \
34690- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34691+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34692 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
34693 MODULE_PARM_DESC(X, desc);
34694 #else
34695diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34696index efd80bd..21fcff0 100644
34697--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34698+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34699@@ -487,7 +487,7 @@ struct bnx2x_rx_mode_obj {
34700
34701 int (*wait_comp)(struct bnx2x *bp,
34702 struct bnx2x_rx_mode_ramrod_params *p);
34703-};
34704+} __no_const;
34705
34706 /********************** Set multicast group ***********************************/
34707
34708diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
34709index 93865f8..5448741 100644
34710--- a/drivers/net/ethernet/broadcom/tg3.h
34711+++ b/drivers/net/ethernet/broadcom/tg3.h
34712@@ -140,6 +140,7 @@
34713 #define CHIPREV_ID_5750_A0 0x4000
34714 #define CHIPREV_ID_5750_A1 0x4001
34715 #define CHIPREV_ID_5750_A3 0x4003
34716+#define CHIPREV_ID_5750_C1 0x4201
34717 #define CHIPREV_ID_5750_C2 0x4202
34718 #define CHIPREV_ID_5752_A0_HW 0x5000
34719 #define CHIPREV_ID_5752_A0 0x6000
34720diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34721index c4e8643..0979484 100644
34722--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34723+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34724@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
34725 */
34726 struct l2t_skb_cb {
34727 arp_failure_handler_func arp_failure_handler;
34728-};
34729+} __no_const;
34730
34731 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34732
34733diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
34734index d3cd489..0fd52dd 100644
34735--- a/drivers/net/ethernet/dec/tulip/de4x5.c
34736+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
34737@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34738 for (i=0; i<ETH_ALEN; i++) {
34739 tmp.addr[i] = dev->dev_addr[i];
34740 }
34741- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34742+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34743 break;
34744
34745 case DE4X5_SET_HWADDR: /* Set the hardware address */
34746@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34747 spin_lock_irqsave(&lp->lock, flags);
34748 memcpy(&statbuf, &lp->pktStats, ioc->len);
34749 spin_unlock_irqrestore(&lp->lock, flags);
34750- if (copy_to_user(ioc->data, &statbuf, ioc->len))
34751+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34752 return -EFAULT;
34753 break;
34754 }
34755diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
34756index ed7d1dc..d426748 100644
34757--- a/drivers/net/ethernet/dec/tulip/eeprom.c
34758+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
34759@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34760 {NULL}};
34761
34762
34763-static const char *block_name[] __devinitdata = {
34764+static const char *block_name[] __devinitconst = {
34765 "21140 non-MII",
34766 "21140 MII PHY",
34767 "21142 Serial PHY",
34768diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
34769index 75d45f8..3d9c55b 100644
34770--- a/drivers/net/ethernet/dec/tulip/uli526x.c
34771+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
34772@@ -129,7 +129,7 @@ struct uli526x_board_info {
34773 struct uli_phy_ops {
34774 void (*write)(struct uli526x_board_info *, u8, u8, u16);
34775 u16 (*read)(struct uli526x_board_info *, u8, u8);
34776- } phy;
34777+ } __no_const phy;
34778 struct net_device *next_dev; /* next device */
34779 struct pci_dev *pdev; /* PCI device */
34780 spinlock_t lock;
34781diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
34782index 4d1ffca..7c1ec4d 100644
34783--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
34784+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
34785@@ -236,7 +236,7 @@ struct pci_id_info {
34786 int drv_flags; /* Driver use, intended as capability flags. */
34787 };
34788
34789-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34790+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34791 { /* Sometime a Level-One switch card. */
34792 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34793 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34794diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
34795index d7bb52a..3b83588 100644
34796--- a/drivers/net/ethernet/dlink/sundance.c
34797+++ b/drivers/net/ethernet/dlink/sundance.c
34798@@ -218,7 +218,7 @@ enum {
34799 struct pci_id_info {
34800 const char *name;
34801 };
34802-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34803+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34804 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34805 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34806 {"D-Link DFE-580TX 4 port Server Adapter"},
34807diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
34808index bd5cf7e..c165651 100644
34809--- a/drivers/net/ethernet/emulex/benet/be_main.c
34810+++ b/drivers/net/ethernet/emulex/benet/be_main.c
34811@@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
34812
34813 if (wrapped)
34814 newacc += 65536;
34815- ACCESS_ONCE(*acc) = newacc;
34816+ ACCESS_ONCE_RW(*acc) = newacc;
34817 }
34818
34819 void be_parse_stats(struct be_adapter *adapter)
34820diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
34821index 16b0704..d2c07d7 100644
34822--- a/drivers/net/ethernet/faraday/ftgmac100.c
34823+++ b/drivers/net/ethernet/faraday/ftgmac100.c
34824@@ -31,6 +31,8 @@
34825 #include <linux/netdevice.h>
34826 #include <linux/phy.h>
34827 #include <linux/platform_device.h>
34828+#include <linux/interrupt.h>
34829+#include <linux/irqreturn.h>
34830 #include <net/ip.h>
34831
34832 #include "ftgmac100.h"
34833diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
34834index 829b109..4ae5f6a 100644
34835--- a/drivers/net/ethernet/faraday/ftmac100.c
34836+++ b/drivers/net/ethernet/faraday/ftmac100.c
34837@@ -31,6 +31,8 @@
34838 #include <linux/module.h>
34839 #include <linux/netdevice.h>
34840 #include <linux/platform_device.h>
34841+#include <linux/interrupt.h>
34842+#include <linux/irqreturn.h>
34843
34844 #include "ftmac100.h"
34845
34846diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34847index 9d71c9c..0e4a0ac 100644
34848--- a/drivers/net/ethernet/fealnx.c
34849+++ b/drivers/net/ethernet/fealnx.c
34850@@ -150,7 +150,7 @@ struct chip_info {
34851 int flags;
34852 };
34853
34854-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34855+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34856 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34857 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34858 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34859diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
34860index fa47b85..246edeb 100644
34861--- a/drivers/net/ethernet/intel/e1000e/e1000.h
34862+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
34863@@ -181,7 +181,7 @@ struct e1000_info;
34864 #define E1000_TXDCTL_DMA_BURST_ENABLE \
34865 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
34866 E1000_TXDCTL_COUNT_DESC | \
34867- (5 << 16) | /* wthresh must be +1 more than desired */\
34868+ (1 << 16) | /* wthresh must be +1 more than desired */\
34869 (1 << 8) | /* hthresh */ \
34870 0x1f) /* pthresh */
34871
34872diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34873index ed5b409..ec37828 100644
34874--- a/drivers/net/ethernet/intel/e1000e/hw.h
34875+++ b/drivers/net/ethernet/intel/e1000e/hw.h
34876@@ -797,6 +797,7 @@ struct e1000_mac_operations {
34877 void (*rar_set)(struct e1000_hw *, u8 *, u32);
34878 s32 (*read_mac_addr)(struct e1000_hw *);
34879 };
34880+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34881
34882 /*
34883 * When to use various PHY register access functions:
34884@@ -837,6 +838,7 @@ struct e1000_phy_operations {
34885 void (*power_up)(struct e1000_hw *);
34886 void (*power_down)(struct e1000_hw *);
34887 };
34888+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34889
34890 /* Function pointers for the NVM. */
34891 struct e1000_nvm_operations {
34892@@ -849,9 +851,10 @@ struct e1000_nvm_operations {
34893 s32 (*validate)(struct e1000_hw *);
34894 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34895 };
34896+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34897
34898 struct e1000_mac_info {
34899- struct e1000_mac_operations ops;
34900+ e1000_mac_operations_no_const ops;
34901 u8 addr[ETH_ALEN];
34902 u8 perm_addr[ETH_ALEN];
34903
34904@@ -892,7 +895,7 @@ struct e1000_mac_info {
34905 };
34906
34907 struct e1000_phy_info {
34908- struct e1000_phy_operations ops;
34909+ e1000_phy_operations_no_const ops;
34910
34911 enum e1000_phy_type type;
34912
34913@@ -926,7 +929,7 @@ struct e1000_phy_info {
34914 };
34915
34916 struct e1000_nvm_info {
34917- struct e1000_nvm_operations ops;
34918+ e1000_nvm_operations_no_const ops;
34919
34920 enum e1000_nvm_type type;
34921 enum e1000_nvm_override override;
34922diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34923index c2a51dc..c2bd262 100644
34924--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34925+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34926@@ -327,6 +327,7 @@ struct e1000_mac_operations {
34927 void (*release_swfw_sync)(struct e1000_hw *, u16);
34928
34929 };
34930+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34931
34932 struct e1000_phy_operations {
34933 s32 (*acquire)(struct e1000_hw *);
34934@@ -343,6 +344,7 @@ struct e1000_phy_operations {
34935 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34936 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34937 };
34938+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34939
34940 struct e1000_nvm_operations {
34941 s32 (*acquire)(struct e1000_hw *);
34942@@ -353,6 +355,7 @@ struct e1000_nvm_operations {
34943 s32 (*validate)(struct e1000_hw *);
34944 s32 (*valid_led_default)(struct e1000_hw *, u16 *);
34945 };
34946+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34947
34948 struct e1000_info {
34949 s32 (*get_invariants)(struct e1000_hw *);
34950@@ -364,7 +367,7 @@ struct e1000_info {
34951 extern const struct e1000_info e1000_82575_info;
34952
34953 struct e1000_mac_info {
34954- struct e1000_mac_operations ops;
34955+ e1000_mac_operations_no_const ops;
34956
34957 u8 addr[6];
34958 u8 perm_addr[6];
34959@@ -402,7 +405,7 @@ struct e1000_mac_info {
34960 };
34961
34962 struct e1000_phy_info {
34963- struct e1000_phy_operations ops;
34964+ e1000_phy_operations_no_const ops;
34965
34966 enum e1000_phy_type type;
34967
34968@@ -437,7 +440,7 @@ struct e1000_phy_info {
34969 };
34970
34971 struct e1000_nvm_info {
34972- struct e1000_nvm_operations ops;
34973+ e1000_nvm_operations_no_const ops;
34974 enum e1000_nvm_type type;
34975 enum e1000_nvm_override override;
34976
34977@@ -482,6 +485,7 @@ struct e1000_mbx_operations {
34978 s32 (*check_for_ack)(struct e1000_hw *, u16);
34979 s32 (*check_for_rst)(struct e1000_hw *, u16);
34980 };
34981+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34982
34983 struct e1000_mbx_stats {
34984 u32 msgs_tx;
34985@@ -493,7 +497,7 @@ struct e1000_mbx_stats {
34986 };
34987
34988 struct e1000_mbx_info {
34989- struct e1000_mbx_operations ops;
34990+ e1000_mbx_operations_no_const ops;
34991 struct e1000_mbx_stats stats;
34992 u32 timeout;
34993 u32 usec_delay;
34994diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
34995index 57db3c6..aa825fc 100644
34996--- a/drivers/net/ethernet/intel/igbvf/vf.h
34997+++ b/drivers/net/ethernet/intel/igbvf/vf.h
34998@@ -189,9 +189,10 @@ struct e1000_mac_operations {
34999 s32 (*read_mac_addr)(struct e1000_hw *);
35000 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
35001 };
35002+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
35003
35004 struct e1000_mac_info {
35005- struct e1000_mac_operations ops;
35006+ e1000_mac_operations_no_const ops;
35007 u8 addr[6];
35008 u8 perm_addr[6];
35009
35010@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
35011 s32 (*check_for_ack)(struct e1000_hw *);
35012 s32 (*check_for_rst)(struct e1000_hw *);
35013 };
35014+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
35015
35016 struct e1000_mbx_stats {
35017 u32 msgs_tx;
35018@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
35019 };
35020
35021 struct e1000_mbx_info {
35022- struct e1000_mbx_operations ops;
35023+ e1000_mbx_operations_no_const ops;
35024 struct e1000_mbx_stats stats;
35025 u32 timeout;
35026 u32 usec_delay;
35027diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
35028index dcebd12..c1fe8be 100644
35029--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
35030+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
35031@@ -805,7 +805,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
35032 /* store the new cycle speed */
35033 adapter->cycle_speed = cycle_speed;
35034
35035- ACCESS_ONCE(adapter->base_incval) = incval;
35036+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
35037 smp_mb();
35038
35039 /* grab the ptp lock */
35040diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35041index 204848d..d8aeaec 100644
35042--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35043+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35044@@ -2791,6 +2791,7 @@ struct ixgbe_eeprom_operations {
35045 s32 (*update_checksum)(struct ixgbe_hw *);
35046 u16 (*calc_checksum)(struct ixgbe_hw *);
35047 };
35048+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
35049
35050 struct ixgbe_mac_operations {
35051 s32 (*init_hw)(struct ixgbe_hw *);
35052@@ -2856,6 +2857,7 @@ struct ixgbe_mac_operations {
35053 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
35054 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
35055 };
35056+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35057
35058 struct ixgbe_phy_operations {
35059 s32 (*identify)(struct ixgbe_hw *);
35060@@ -2875,9 +2877,10 @@ struct ixgbe_phy_operations {
35061 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
35062 s32 (*check_overtemp)(struct ixgbe_hw *);
35063 };
35064+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
35065
35066 struct ixgbe_eeprom_info {
35067- struct ixgbe_eeprom_operations ops;
35068+ ixgbe_eeprom_operations_no_const ops;
35069 enum ixgbe_eeprom_type type;
35070 u32 semaphore_delay;
35071 u16 word_size;
35072@@ -2887,7 +2890,7 @@ struct ixgbe_eeprom_info {
35073
35074 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
35075 struct ixgbe_mac_info {
35076- struct ixgbe_mac_operations ops;
35077+ ixgbe_mac_operations_no_const ops;
35078 enum ixgbe_mac_type type;
35079 u8 addr[ETH_ALEN];
35080 u8 perm_addr[ETH_ALEN];
35081@@ -2916,7 +2919,7 @@ struct ixgbe_mac_info {
35082 };
35083
35084 struct ixgbe_phy_info {
35085- struct ixgbe_phy_operations ops;
35086+ ixgbe_phy_operations_no_const ops;
35087 struct mdio_if_info mdio;
35088 enum ixgbe_phy_type type;
35089 u32 id;
35090@@ -2944,6 +2947,7 @@ struct ixgbe_mbx_operations {
35091 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
35092 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
35093 };
35094+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35095
35096 struct ixgbe_mbx_stats {
35097 u32 msgs_tx;
35098@@ -2955,7 +2959,7 @@ struct ixgbe_mbx_stats {
35099 };
35100
35101 struct ixgbe_mbx_info {
35102- struct ixgbe_mbx_operations ops;
35103+ ixgbe_mbx_operations_no_const ops;
35104 struct ixgbe_mbx_stats stats;
35105 u32 timeout;
35106 u32 usec_delay;
35107diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
35108index 25c951d..cc7cf33 100644
35109--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
35110+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
35111@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
35112 s32 (*clear_vfta)(struct ixgbe_hw *);
35113 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
35114 };
35115+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35116
35117 enum ixgbe_mac_type {
35118 ixgbe_mac_unknown = 0,
35119@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
35120 };
35121
35122 struct ixgbe_mac_info {
35123- struct ixgbe_mac_operations ops;
35124+ ixgbe_mac_operations_no_const ops;
35125 u8 addr[6];
35126 u8 perm_addr[6];
35127
35128@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
35129 s32 (*check_for_ack)(struct ixgbe_hw *);
35130 s32 (*check_for_rst)(struct ixgbe_hw *);
35131 };
35132+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35133
35134 struct ixgbe_mbx_stats {
35135 u32 msgs_tx;
35136@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
35137 };
35138
35139 struct ixgbe_mbx_info {
35140- struct ixgbe_mbx_operations ops;
35141+ ixgbe_mbx_operations_no_const ops;
35142 struct ixgbe_mbx_stats stats;
35143 u32 timeout;
35144 u32 udelay;
35145diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
35146index a0313de..e83a572 100644
35147--- a/drivers/net/ethernet/mellanox/mlx4/main.c
35148+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
35149@@ -41,6 +41,7 @@
35150 #include <linux/slab.h>
35151 #include <linux/io-mapping.h>
35152 #include <linux/delay.h>
35153+#include <linux/sched.h>
35154
35155 #include <linux/mlx4/device.h>
35156 #include <linux/mlx4/doorbell.h>
35157diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35158index 5046a64..71ca936 100644
35159--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
35160+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35161@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
35162 void (*link_down)(struct __vxge_hw_device *devh);
35163 void (*crit_err)(struct __vxge_hw_device *devh,
35164 enum vxge_hw_event type, u64 ext_data);
35165-};
35166+} __no_const;
35167
35168 /*
35169 * struct __vxge_hw_blockpool_entry - Block private data structure
35170diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35171index 4a518a3..936b334 100644
35172--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35173+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35174@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
35175 struct vxge_hw_mempool_dma *dma_object,
35176 u32 index,
35177 u32 is_last);
35178-};
35179+} __no_const;
35180
35181 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
35182 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
35183diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
35184index eb81da4..1592b62 100644
35185--- a/drivers/net/ethernet/realtek/r8169.c
35186+++ b/drivers/net/ethernet/realtek/r8169.c
35187@@ -723,22 +723,22 @@ struct rtl8169_private {
35188 struct mdio_ops {
35189 void (*write)(void __iomem *, int, int);
35190 int (*read)(void __iomem *, int);
35191- } mdio_ops;
35192+ } __no_const mdio_ops;
35193
35194 struct pll_power_ops {
35195 void (*down)(struct rtl8169_private *);
35196 void (*up)(struct rtl8169_private *);
35197- } pll_power_ops;
35198+ } __no_const pll_power_ops;
35199
35200 struct jumbo_ops {
35201 void (*enable)(struct rtl8169_private *);
35202 void (*disable)(struct rtl8169_private *);
35203- } jumbo_ops;
35204+ } __no_const jumbo_ops;
35205
35206 struct csi_ops {
35207 void (*write)(void __iomem *, int, int);
35208 u32 (*read)(void __iomem *, int);
35209- } csi_ops;
35210+ } __no_const csi_ops;
35211
35212 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
35213 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
35214diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
35215index 4613591..d816601 100644
35216--- a/drivers/net/ethernet/sis/sis190.c
35217+++ b/drivers/net/ethernet/sis/sis190.c
35218@@ -1618,7 +1618,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
35219 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
35220 struct net_device *dev)
35221 {
35222- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
35223+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
35224 struct sis190_private *tp = netdev_priv(dev);
35225 struct pci_dev *isa_bridge;
35226 u8 reg, tmp8;
35227diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35228index c07cfe9..81cbf7e 100644
35229--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35230+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35231@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
35232
35233 writel(value, ioaddr + MMC_CNTRL);
35234
35235- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35236- MMC_CNTRL, value);
35237+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35238+// MMC_CNTRL, value);
35239 }
35240
35241 /* To mask all all interrupts.*/
35242diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
35243index 2857ab0..9a1f9b0 100644
35244--- a/drivers/net/hyperv/hyperv_net.h
35245+++ b/drivers/net/hyperv/hyperv_net.h
35246@@ -99,7 +99,7 @@ struct rndis_device {
35247
35248 enum rndis_device_state state;
35249 bool link_state;
35250- atomic_t new_req_id;
35251+ atomic_unchecked_t new_req_id;
35252
35253 spinlock_t request_lock;
35254 struct list_head req_list;
35255diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
35256index 981ebb1..b34959b 100644
35257--- a/drivers/net/hyperv/rndis_filter.c
35258+++ b/drivers/net/hyperv/rndis_filter.c
35259@@ -97,7 +97,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35260 * template
35261 */
35262 set = &rndis_msg->msg.set_req;
35263- set->req_id = atomic_inc_return(&dev->new_req_id);
35264+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35265
35266 /* Add to the request list */
35267 spin_lock_irqsave(&dev->request_lock, flags);
35268@@ -648,7 +648,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35269
35270 /* Setup the rndis set */
35271 halt = &request->request_msg.msg.halt_req;
35272- halt->req_id = atomic_inc_return(&dev->new_req_id);
35273+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35274
35275 /* Ignore return since this msg is optional. */
35276 rndis_filter_send_request(dev, request);
35277diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
35278index 5c05572..389610b 100644
35279--- a/drivers/net/ppp/ppp_generic.c
35280+++ b/drivers/net/ppp/ppp_generic.c
35281@@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35282 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
35283 struct ppp_stats stats;
35284 struct ppp_comp_stats cstats;
35285- char *vers;
35286
35287 switch (cmd) {
35288 case SIOCGPPPSTATS:
35289@@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35290 break;
35291
35292 case SIOCGPPPVER:
35293- vers = PPP_VERSION;
35294- if (copy_to_user(addr, vers, strlen(vers) + 1))
35295+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
35296 break;
35297 err = 0;
35298 break;
35299diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
35300index 62f30b4..ff99dfd 100644
35301--- a/drivers/net/usb/hso.c
35302+++ b/drivers/net/usb/hso.c
35303@@ -71,7 +71,7 @@
35304 #include <asm/byteorder.h>
35305 #include <linux/serial_core.h>
35306 #include <linux/serial.h>
35307-
35308+#include <asm/local.h>
35309
35310 #define MOD_AUTHOR "Option Wireless"
35311 #define MOD_DESCRIPTION "USB High Speed Option driver"
35312@@ -1182,7 +1182,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
35313 struct urb *urb;
35314
35315 urb = serial->rx_urb[0];
35316- if (serial->port.count > 0) {
35317+ if (atomic_read(&serial->port.count) > 0) {
35318 count = put_rxbuf_data(urb, serial);
35319 if (count == -1)
35320 return;
35321@@ -1218,7 +1218,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
35322 DUMP1(urb->transfer_buffer, urb->actual_length);
35323
35324 /* Anyone listening? */
35325- if (serial->port.count == 0)
35326+ if (atomic_read(&serial->port.count) == 0)
35327 return;
35328
35329 if (status == 0) {
35330@@ -1300,8 +1300,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35331 tty_port_tty_set(&serial->port, tty);
35332
35333 /* check for port already opened, if not set the termios */
35334- serial->port.count++;
35335- if (serial->port.count == 1) {
35336+ if (atomic_inc_return(&serial->port.count) == 1) {
35337 serial->rx_state = RX_IDLE;
35338 /* Force default termio settings */
35339 _hso_serial_set_termios(tty, NULL);
35340@@ -1313,7 +1312,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35341 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35342 if (result) {
35343 hso_stop_serial_device(serial->parent);
35344- serial->port.count--;
35345+ atomic_dec(&serial->port.count);
35346 kref_put(&serial->parent->ref, hso_serial_ref_free);
35347 }
35348 } else {
35349@@ -1350,10 +1349,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
35350
35351 /* reset the rts and dtr */
35352 /* do the actual close */
35353- serial->port.count--;
35354+ atomic_dec(&serial->port.count);
35355
35356- if (serial->port.count <= 0) {
35357- serial->port.count = 0;
35358+ if (atomic_read(&serial->port.count) <= 0) {
35359+ atomic_set(&serial->port.count, 0);
35360 tty_port_tty_set(&serial->port, NULL);
35361 if (!usb_gone)
35362 hso_stop_serial_device(serial->parent);
35363@@ -1429,7 +1428,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
35364
35365 /* the actual setup */
35366 spin_lock_irqsave(&serial->serial_lock, flags);
35367- if (serial->port.count)
35368+ if (atomic_read(&serial->port.count))
35369 _hso_serial_set_termios(tty, old);
35370 else
35371 tty->termios = old;
35372@@ -1888,7 +1887,7 @@ static void intr_callback(struct urb *urb)
35373 D1("Pending read interrupt on port %d\n", i);
35374 spin_lock(&serial->serial_lock);
35375 if (serial->rx_state == RX_IDLE &&
35376- serial->port.count > 0) {
35377+ atomic_read(&serial->port.count) > 0) {
35378 /* Setup and send a ctrl req read on
35379 * port i */
35380 if (!serial->rx_urb_filled[0]) {
35381@@ -3079,7 +3078,7 @@ static int hso_resume(struct usb_interface *iface)
35382 /* Start all serial ports */
35383 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35384 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35385- if (dev2ser(serial_table[i])->port.count) {
35386+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
35387 result =
35388 hso_start_serial_device(serial_table[i], GFP_NOIO);
35389 hso_kick_transmit(dev2ser(serial_table[i]));
35390diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
35391index 420d69b..74f90a2 100644
35392--- a/drivers/net/wireless/ath/ath.h
35393+++ b/drivers/net/wireless/ath/ath.h
35394@@ -119,6 +119,7 @@ struct ath_ops {
35395 void (*write_flush) (void *);
35396 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
35397 };
35398+typedef struct ath_ops __no_const ath_ops_no_const;
35399
35400 struct ath_common;
35401 struct ath_bus_ops;
35402diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35403index 8d78253..bebbb68 100644
35404--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35405+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35406@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35407 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
35408 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
35409
35410- ACCESS_ONCE(ads->ds_link) = i->link;
35411- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
35412+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
35413+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
35414
35415 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
35416 ctl6 = SM(i->keytype, AR_EncrType);
35417@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35418
35419 if ((i->is_first || i->is_last) &&
35420 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
35421- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
35422+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
35423 | set11nTries(i->rates, 1)
35424 | set11nTries(i->rates, 2)
35425 | set11nTries(i->rates, 3)
35426 | (i->dur_update ? AR_DurUpdateEna : 0)
35427 | SM(0, AR_BurstDur);
35428
35429- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
35430+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
35431 | set11nRate(i->rates, 1)
35432 | set11nRate(i->rates, 2)
35433 | set11nRate(i->rates, 3);
35434 } else {
35435- ACCESS_ONCE(ads->ds_ctl2) = 0;
35436- ACCESS_ONCE(ads->ds_ctl3) = 0;
35437+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
35438+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
35439 }
35440
35441 if (!i->is_first) {
35442- ACCESS_ONCE(ads->ds_ctl0) = 0;
35443- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35444- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35445+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
35446+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35447+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35448 return;
35449 }
35450
35451@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35452 break;
35453 }
35454
35455- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35456+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35457 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35458 | SM(i->txpower, AR_XmitPower)
35459 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35460@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35461 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
35462 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
35463
35464- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35465- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35466+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35467+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35468
35469 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
35470 return;
35471
35472- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35473+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35474 | set11nPktDurRTSCTS(i->rates, 1);
35475
35476- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35477+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35478 | set11nPktDurRTSCTS(i->rates, 3);
35479
35480- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35481+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35482 | set11nRateFlags(i->rates, 1)
35483 | set11nRateFlags(i->rates, 2)
35484 | set11nRateFlags(i->rates, 3)
35485diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35486index d9e0824..1a874e7 100644
35487--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35488+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35489@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35490 (i->qcu << AR_TxQcuNum_S) | desc_len;
35491
35492 checksum += val;
35493- ACCESS_ONCE(ads->info) = val;
35494+ ACCESS_ONCE_RW(ads->info) = val;
35495
35496 checksum += i->link;
35497- ACCESS_ONCE(ads->link) = i->link;
35498+ ACCESS_ONCE_RW(ads->link) = i->link;
35499
35500 checksum += i->buf_addr[0];
35501- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
35502+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
35503 checksum += i->buf_addr[1];
35504- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
35505+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
35506 checksum += i->buf_addr[2];
35507- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
35508+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
35509 checksum += i->buf_addr[3];
35510- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
35511+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
35512
35513 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
35514- ACCESS_ONCE(ads->ctl3) = val;
35515+ ACCESS_ONCE_RW(ads->ctl3) = val;
35516 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
35517- ACCESS_ONCE(ads->ctl5) = val;
35518+ ACCESS_ONCE_RW(ads->ctl5) = val;
35519 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
35520- ACCESS_ONCE(ads->ctl7) = val;
35521+ ACCESS_ONCE_RW(ads->ctl7) = val;
35522 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
35523- ACCESS_ONCE(ads->ctl9) = val;
35524+ ACCESS_ONCE_RW(ads->ctl9) = val;
35525
35526 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
35527- ACCESS_ONCE(ads->ctl10) = checksum;
35528+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
35529
35530 if (i->is_first || i->is_last) {
35531- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
35532+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
35533 | set11nTries(i->rates, 1)
35534 | set11nTries(i->rates, 2)
35535 | set11nTries(i->rates, 3)
35536 | (i->dur_update ? AR_DurUpdateEna : 0)
35537 | SM(0, AR_BurstDur);
35538
35539- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
35540+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
35541 | set11nRate(i->rates, 1)
35542 | set11nRate(i->rates, 2)
35543 | set11nRate(i->rates, 3);
35544 } else {
35545- ACCESS_ONCE(ads->ctl13) = 0;
35546- ACCESS_ONCE(ads->ctl14) = 0;
35547+ ACCESS_ONCE_RW(ads->ctl13) = 0;
35548+ ACCESS_ONCE_RW(ads->ctl14) = 0;
35549 }
35550
35551 ads->ctl20 = 0;
35552@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35553
35554 ctl17 = SM(i->keytype, AR_EncrType);
35555 if (!i->is_first) {
35556- ACCESS_ONCE(ads->ctl11) = 0;
35557- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35558- ACCESS_ONCE(ads->ctl15) = 0;
35559- ACCESS_ONCE(ads->ctl16) = 0;
35560- ACCESS_ONCE(ads->ctl17) = ctl17;
35561- ACCESS_ONCE(ads->ctl18) = 0;
35562- ACCESS_ONCE(ads->ctl19) = 0;
35563+ ACCESS_ONCE_RW(ads->ctl11) = 0;
35564+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35565+ ACCESS_ONCE_RW(ads->ctl15) = 0;
35566+ ACCESS_ONCE_RW(ads->ctl16) = 0;
35567+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35568+ ACCESS_ONCE_RW(ads->ctl18) = 0;
35569+ ACCESS_ONCE_RW(ads->ctl19) = 0;
35570 return;
35571 }
35572
35573- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35574+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35575 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35576 | SM(i->txpower, AR_XmitPower)
35577 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35578@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35579 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
35580 ctl12 |= SM(val, AR_PAPRDChainMask);
35581
35582- ACCESS_ONCE(ads->ctl12) = ctl12;
35583- ACCESS_ONCE(ads->ctl17) = ctl17;
35584+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
35585+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35586
35587- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35588+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35589 | set11nPktDurRTSCTS(i->rates, 1);
35590
35591- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35592+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35593 | set11nPktDurRTSCTS(i->rates, 3);
35594
35595- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
35596+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
35597 | set11nRateFlags(i->rates, 1)
35598 | set11nRateFlags(i->rates, 2)
35599 | set11nRateFlags(i->rates, 3)
35600 | SM(i->rtscts_rate, AR_RTSCTSRate);
35601
35602- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
35603+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
35604 }
35605
35606 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
35607diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
35608index 02f5007..bd0bd8f 100644
35609--- a/drivers/net/wireless/ath/ath9k/hw.h
35610+++ b/drivers/net/wireless/ath/ath9k/hw.h
35611@@ -610,7 +610,7 @@ struct ath_hw_private_ops {
35612
35613 /* ANI */
35614 void (*ani_cache_ini_regs)(struct ath_hw *ah);
35615-};
35616+} __no_const;
35617
35618 /**
35619 * struct ath_hw_ops - callbacks used by hardware code and driver code
35620@@ -640,7 +640,7 @@ struct ath_hw_ops {
35621 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
35622 struct ath_hw_antcomb_conf *antconf);
35623
35624-};
35625+} __no_const;
35626
35627 struct ath_nf_limits {
35628 s16 max;
35629@@ -660,7 +660,7 @@ enum ath_cal_list {
35630 #define AH_FASTCC 0x4
35631
35632 struct ath_hw {
35633- struct ath_ops reg_ops;
35634+ ath_ops_no_const reg_ops;
35635
35636 struct ieee80211_hw *hw;
35637 struct ath_common common;
35638diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35639index af00e2c..ab04d34 100644
35640--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35641+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35642@@ -545,7 +545,7 @@ struct phy_func_ptr {
35643 void (*carrsuppr)(struct brcms_phy *);
35644 s32 (*rxsigpwr)(struct brcms_phy *, s32);
35645 void (*detach)(struct brcms_phy *);
35646-};
35647+} __no_const;
35648
35649 struct brcms_phy {
35650 struct brcms_phy_pub pubpi_ro;
35651diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
35652index faec404..a5277f1 100644
35653--- a/drivers/net/wireless/iwlegacy/3945-mac.c
35654+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
35655@@ -3611,7 +3611,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
35656 */
35657 if (il3945_mod_params.disable_hw_scan) {
35658 D_INFO("Disabling hw_scan\n");
35659- il3945_mac_ops.hw_scan = NULL;
35660+ pax_open_kernel();
35661+ *(void **)&il3945_mac_ops.hw_scan = NULL;
35662+ pax_close_kernel();
35663 }
35664
35665 D_INFO("*** LOAD DRIVER ***\n");
35666diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
35667index a0b7cfd..20b49f7 100644
35668--- a/drivers/net/wireless/mac80211_hwsim.c
35669+++ b/drivers/net/wireless/mac80211_hwsim.c
35670@@ -1752,9 +1752,11 @@ static int __init init_mac80211_hwsim(void)
35671 return -EINVAL;
35672
35673 if (fake_hw_scan) {
35674- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35675- mac80211_hwsim_ops.sw_scan_start = NULL;
35676- mac80211_hwsim_ops.sw_scan_complete = NULL;
35677+ pax_open_kernel();
35678+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35679+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
35680+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
35681+ pax_close_kernel();
35682 }
35683
35684 spin_lock_init(&hwsim_radio_lock);
35685diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
35686index bd3b0bf..f9db92a 100644
35687--- a/drivers/net/wireless/mwifiex/main.h
35688+++ b/drivers/net/wireless/mwifiex/main.h
35689@@ -567,7 +567,7 @@ struct mwifiex_if_ops {
35690 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
35691 int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *);
35692 int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
35693-};
35694+} __no_const;
35695
35696 struct mwifiex_adapter {
35697 u8 iface_type;
35698diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
35699index dfcd02a..a42a59d 100644
35700--- a/drivers/net/wireless/rndis_wlan.c
35701+++ b/drivers/net/wireless/rndis_wlan.c
35702@@ -1235,7 +1235,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
35703
35704 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
35705
35706- if (rts_threshold < 0 || rts_threshold > 2347)
35707+ if (rts_threshold > 2347)
35708 rts_threshold = 2347;
35709
35710 tmp = cpu_to_le32(rts_threshold);
35711diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
35712index 8f75402..eed109d 100644
35713--- a/drivers/net/wireless/rt2x00/rt2x00.h
35714+++ b/drivers/net/wireless/rt2x00/rt2x00.h
35715@@ -396,7 +396,7 @@ struct rt2x00_intf {
35716 * for hardware which doesn't support hardware
35717 * sequence counting.
35718 */
35719- atomic_t seqno;
35720+ atomic_unchecked_t seqno;
35721 };
35722
35723 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
35724diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
35725index 2fd8301..9767e8c 100644
35726--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
35727+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
35728@@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
35729 * sequence counter given by mac80211.
35730 */
35731 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
35732- seqno = atomic_add_return(0x10, &intf->seqno);
35733+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
35734 else
35735- seqno = atomic_read(&intf->seqno);
35736+ seqno = atomic_read_unchecked(&intf->seqno);
35737
35738 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
35739 hdr->seq_ctrl |= cpu_to_le16(seqno);
35740diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
35741index 9d8f581..0f6589e 100644
35742--- a/drivers/net/wireless/ti/wl1251/wl1251.h
35743+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
35744@@ -266,7 +266,7 @@ struct wl1251_if_operations {
35745 void (*reset)(struct wl1251 *wl);
35746 void (*enable_irq)(struct wl1251 *wl);
35747 void (*disable_irq)(struct wl1251 *wl);
35748-};
35749+} __no_const;
35750
35751 struct wl1251 {
35752 struct ieee80211_hw *hw;
35753diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
35754index 0b3f0b5..62f68bd 100644
35755--- a/drivers/net/wireless/ti/wlcore/wlcore.h
35756+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
35757@@ -61,7 +61,7 @@ struct wlcore_ops {
35758 struct wl12xx_vif *wlvif);
35759 s8 (*get_pg_ver)(struct wl1271 *wl);
35760 void (*get_mac)(struct wl1271 *wl);
35761-};
35762+} __no_const;
35763
35764 enum wlcore_partitions {
35765 PART_DOWN,
35766diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
35767index f34b5b2..b5abb9f 100644
35768--- a/drivers/oprofile/buffer_sync.c
35769+++ b/drivers/oprofile/buffer_sync.c
35770@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
35771 if (cookie == NO_COOKIE)
35772 offset = pc;
35773 if (cookie == INVALID_COOKIE) {
35774- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35775+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35776 offset = pc;
35777 }
35778 if (cookie != last_cookie) {
35779@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
35780 /* add userspace sample */
35781
35782 if (!mm) {
35783- atomic_inc(&oprofile_stats.sample_lost_no_mm);
35784+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35785 return 0;
35786 }
35787
35788 cookie = lookup_dcookie(mm, s->eip, &offset);
35789
35790 if (cookie == INVALID_COOKIE) {
35791- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35792+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35793 return 0;
35794 }
35795
35796@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35797 /* ignore backtraces if failed to add a sample */
35798 if (state == sb_bt_start) {
35799 state = sb_bt_ignore;
35800- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35801+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35802 }
35803 }
35804 release_mm(mm);
35805diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35806index c0cc4e7..44d4e54 100644
35807--- a/drivers/oprofile/event_buffer.c
35808+++ b/drivers/oprofile/event_buffer.c
35809@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35810 }
35811
35812 if (buffer_pos == buffer_size) {
35813- atomic_inc(&oprofile_stats.event_lost_overflow);
35814+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35815 return;
35816 }
35817
35818diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35819index ed2c3ec..deda85a 100644
35820--- a/drivers/oprofile/oprof.c
35821+++ b/drivers/oprofile/oprof.c
35822@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35823 if (oprofile_ops.switch_events())
35824 return;
35825
35826- atomic_inc(&oprofile_stats.multiplex_counter);
35827+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35828 start_switch_worker();
35829 }
35830
35831diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35832index 917d28e..d62d981 100644
35833--- a/drivers/oprofile/oprofile_stats.c
35834+++ b/drivers/oprofile/oprofile_stats.c
35835@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35836 cpu_buf->sample_invalid_eip = 0;
35837 }
35838
35839- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35840- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35841- atomic_set(&oprofile_stats.event_lost_overflow, 0);
35842- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35843- atomic_set(&oprofile_stats.multiplex_counter, 0);
35844+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35845+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35846+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35847+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35848+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35849 }
35850
35851
35852diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35853index 38b6fc0..b5cbfce 100644
35854--- a/drivers/oprofile/oprofile_stats.h
35855+++ b/drivers/oprofile/oprofile_stats.h
35856@@ -13,11 +13,11 @@
35857 #include <linux/atomic.h>
35858
35859 struct oprofile_stat_struct {
35860- atomic_t sample_lost_no_mm;
35861- atomic_t sample_lost_no_mapping;
35862- atomic_t bt_lost_no_mapping;
35863- atomic_t event_lost_overflow;
35864- atomic_t multiplex_counter;
35865+ atomic_unchecked_t sample_lost_no_mm;
35866+ atomic_unchecked_t sample_lost_no_mapping;
35867+ atomic_unchecked_t bt_lost_no_mapping;
35868+ atomic_unchecked_t event_lost_overflow;
35869+ atomic_unchecked_t multiplex_counter;
35870 };
35871
35872 extern struct oprofile_stat_struct oprofile_stats;
35873diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35874index 849357c..b83c1e0 100644
35875--- a/drivers/oprofile/oprofilefs.c
35876+++ b/drivers/oprofile/oprofilefs.c
35877@@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
35878
35879
35880 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35881- char const *name, atomic_t *val)
35882+ char const *name, atomic_unchecked_t *val)
35883 {
35884 return __oprofilefs_create_file(sb, root, name,
35885 &atomic_ro_fops, 0444, val);
35886diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35887index 3f56bc0..707d642 100644
35888--- a/drivers/parport/procfs.c
35889+++ b/drivers/parport/procfs.c
35890@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35891
35892 *ppos += len;
35893
35894- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35895+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35896 }
35897
35898 #ifdef CONFIG_PARPORT_1284
35899@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35900
35901 *ppos += len;
35902
35903- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35904+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35905 }
35906 #endif /* IEEE1284.3 support. */
35907
35908diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35909index 9fff878..ad0ad53 100644
35910--- a/drivers/pci/hotplug/cpci_hotplug.h
35911+++ b/drivers/pci/hotplug/cpci_hotplug.h
35912@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35913 int (*hardware_test) (struct slot* slot, u32 value);
35914 u8 (*get_power) (struct slot* slot);
35915 int (*set_power) (struct slot* slot, int value);
35916-};
35917+} __no_const;
35918
35919 struct cpci_hp_controller {
35920 unsigned int irq;
35921diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35922index 76ba8a1..20ca857 100644
35923--- a/drivers/pci/hotplug/cpqphp_nvram.c
35924+++ b/drivers/pci/hotplug/cpqphp_nvram.c
35925@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35926
35927 void compaq_nvram_init (void __iomem *rom_start)
35928 {
35929+
35930+#ifndef CONFIG_PAX_KERNEXEC
35931 if (rom_start) {
35932 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35933 }
35934+#endif
35935+
35936 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35937
35938 /* initialize our int15 lock */
35939diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35940index b500840..d7159d3 100644
35941--- a/drivers/pci/pcie/aspm.c
35942+++ b/drivers/pci/pcie/aspm.c
35943@@ -27,9 +27,9 @@
35944 #define MODULE_PARAM_PREFIX "pcie_aspm."
35945
35946 /* Note: those are not register definitions */
35947-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35948-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35949-#define ASPM_STATE_L1 (4) /* L1 state */
35950+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35951+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35952+#define ASPM_STATE_L1 (4U) /* L1 state */
35953 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35954 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35955
35956diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35957index 658ac97..05e1b90 100644
35958--- a/drivers/pci/probe.c
35959+++ b/drivers/pci/probe.c
35960@@ -137,7 +137,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35961 u16 orig_cmd;
35962 struct pci_bus_region region;
35963
35964- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35965+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35966
35967 if (!dev->mmio_always_on) {
35968 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35969diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35970index 27911b5..5b6db88 100644
35971--- a/drivers/pci/proc.c
35972+++ b/drivers/pci/proc.c
35973@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
35974 static int __init pci_proc_init(void)
35975 {
35976 struct pci_dev *dev = NULL;
35977+
35978+#ifdef CONFIG_GRKERNSEC_PROC_ADD
35979+#ifdef CONFIG_GRKERNSEC_PROC_USER
35980+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
35981+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
35982+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
35983+#endif
35984+#else
35985 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
35986+#endif
35987 proc_create("devices", 0, proc_bus_pci_dir,
35988 &proc_bus_pci_dev_operations);
35989 proc_initialized = 1;
35990diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
35991index 8b5610d..a4c22bb 100644
35992--- a/drivers/platform/x86/thinkpad_acpi.c
35993+++ b/drivers/platform/x86/thinkpad_acpi.c
35994@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
35995 return 0;
35996 }
35997
35998-void static hotkey_mask_warn_incomplete_mask(void)
35999+static void hotkey_mask_warn_incomplete_mask(void)
36000 {
36001 /* log only what the user can fix... */
36002 const u32 wantedmask = hotkey_driver_mask &
36003@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
36004 }
36005 }
36006
36007-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36008- struct tp_nvram_state *newn,
36009- const u32 event_mask)
36010-{
36011-
36012 #define TPACPI_COMPARE_KEY(__scancode, __member) \
36013 do { \
36014 if ((event_mask & (1 << __scancode)) && \
36015@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36016 tpacpi_hotkey_send_key(__scancode); \
36017 } while (0)
36018
36019- void issue_volchange(const unsigned int oldvol,
36020- const unsigned int newvol)
36021- {
36022- unsigned int i = oldvol;
36023+static void issue_volchange(const unsigned int oldvol,
36024+ const unsigned int newvol,
36025+ const u32 event_mask)
36026+{
36027+ unsigned int i = oldvol;
36028
36029- while (i > newvol) {
36030- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36031- i--;
36032- }
36033- while (i < newvol) {
36034- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36035- i++;
36036- }
36037+ while (i > newvol) {
36038+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36039+ i--;
36040 }
36041+ while (i < newvol) {
36042+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36043+ i++;
36044+ }
36045+}
36046
36047- void issue_brightnesschange(const unsigned int oldbrt,
36048- const unsigned int newbrt)
36049- {
36050- unsigned int i = oldbrt;
36051+static void issue_brightnesschange(const unsigned int oldbrt,
36052+ const unsigned int newbrt,
36053+ const u32 event_mask)
36054+{
36055+ unsigned int i = oldbrt;
36056
36057- while (i > newbrt) {
36058- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36059- i--;
36060- }
36061- while (i < newbrt) {
36062- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36063- i++;
36064- }
36065+ while (i > newbrt) {
36066+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36067+ i--;
36068+ }
36069+ while (i < newbrt) {
36070+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36071+ i++;
36072 }
36073+}
36074
36075+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36076+ struct tp_nvram_state *newn,
36077+ const u32 event_mask)
36078+{
36079 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
36080 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
36081 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
36082@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36083 oldn->volume_level != newn->volume_level) {
36084 /* recently muted, or repeated mute keypress, or
36085 * multiple presses ending in mute */
36086- issue_volchange(oldn->volume_level, newn->volume_level);
36087+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36088 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
36089 }
36090 } else {
36091@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36092 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36093 }
36094 if (oldn->volume_level != newn->volume_level) {
36095- issue_volchange(oldn->volume_level, newn->volume_level);
36096+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36097 } else if (oldn->volume_toggle != newn->volume_toggle) {
36098 /* repeated vol up/down keypress at end of scale ? */
36099 if (newn->volume_level == 0)
36100@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36101 /* handle brightness */
36102 if (oldn->brightness_level != newn->brightness_level) {
36103 issue_brightnesschange(oldn->brightness_level,
36104- newn->brightness_level);
36105+ newn->brightness_level,
36106+ event_mask);
36107 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
36108 /* repeated key presses that didn't change state */
36109 if (newn->brightness_level == 0)
36110@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36111 && !tp_features.bright_unkfw)
36112 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36113 }
36114+}
36115
36116 #undef TPACPI_COMPARE_KEY
36117 #undef TPACPI_MAY_SEND_KEY
36118-}
36119
36120 /*
36121 * Polling driver
36122diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
36123index 769d265..a3a05ca 100644
36124--- a/drivers/pnp/pnpbios/bioscalls.c
36125+++ b/drivers/pnp/pnpbios/bioscalls.c
36126@@ -58,7 +58,7 @@ do { \
36127 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36128 } while(0)
36129
36130-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36131+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36132 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36133
36134 /*
36135@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36136
36137 cpu = get_cpu();
36138 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36139+
36140+ pax_open_kernel();
36141 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36142+ pax_close_kernel();
36143
36144 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36145 spin_lock_irqsave(&pnp_bios_lock, flags);
36146@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36147 :"memory");
36148 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36149
36150+ pax_open_kernel();
36151 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36152+ pax_close_kernel();
36153+
36154 put_cpu();
36155
36156 /* If we get here and this is set then the PnP BIOS faulted on us. */
36157@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
36158 return status;
36159 }
36160
36161-void pnpbios_calls_init(union pnp_bios_install_struct *header)
36162+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36163 {
36164 int i;
36165
36166@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36167 pnp_bios_callpoint.offset = header->fields.pm16offset;
36168 pnp_bios_callpoint.segment = PNP_CS16;
36169
36170+ pax_open_kernel();
36171+
36172 for_each_possible_cpu(i) {
36173 struct desc_struct *gdt = get_cpu_gdt_table(i);
36174 if (!gdt)
36175@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36176 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36177 (unsigned long)__va(header->fields.pm16dseg));
36178 }
36179+
36180+ pax_close_kernel();
36181 }
36182diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
36183index b0ecacb..7c9da2e 100644
36184--- a/drivers/pnp/resource.c
36185+++ b/drivers/pnp/resource.c
36186@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
36187 return 1;
36188
36189 /* check if the resource is valid */
36190- if (*irq < 0 || *irq > 15)
36191+ if (*irq > 15)
36192 return 0;
36193
36194 /* check if the resource is reserved */
36195@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
36196 return 1;
36197
36198 /* check if the resource is valid */
36199- if (*dma < 0 || *dma == 4 || *dma > 7)
36200+ if (*dma == 4 || *dma > 7)
36201 return 0;
36202
36203 /* check if the resource is reserved */
36204diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
36205index f5d6d37..739f6a9 100644
36206--- a/drivers/power/bq27x00_battery.c
36207+++ b/drivers/power/bq27x00_battery.c
36208@@ -72,7 +72,7 @@
36209 struct bq27x00_device_info;
36210 struct bq27x00_access_methods {
36211 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
36212-};
36213+} __no_const;
36214
36215 enum bq27x00_chip { BQ27000, BQ27500 };
36216
36217diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
36218index 8d53174..04c65de 100644
36219--- a/drivers/regulator/max8660.c
36220+++ b/drivers/regulator/max8660.c
36221@@ -333,8 +333,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
36222 max8660->shadow_regs[MAX8660_OVER1] = 5;
36223 } else {
36224 /* Otherwise devices can be toggled via software */
36225- max8660_dcdc_ops.enable = max8660_dcdc_enable;
36226- max8660_dcdc_ops.disable = max8660_dcdc_disable;
36227+ pax_open_kernel();
36228+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
36229+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
36230+ pax_close_kernel();
36231 }
36232
36233 /*
36234diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
36235index 970a233..ee1f241 100644
36236--- a/drivers/regulator/mc13892-regulator.c
36237+++ b/drivers/regulator/mc13892-regulator.c
36238@@ -566,10 +566,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
36239 }
36240 mc13xxx_unlock(mc13892);
36241
36242- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36243+ pax_open_kernel();
36244+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36245 = mc13892_vcam_set_mode;
36246- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36247+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36248 = mc13892_vcam_get_mode;
36249+ pax_close_kernel();
36250
36251 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
36252 ARRAY_SIZE(mc13892_regulators));
36253diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
36254index cace6d3..f623fda 100644
36255--- a/drivers/rtc/rtc-dev.c
36256+++ b/drivers/rtc/rtc-dev.c
36257@@ -14,6 +14,7 @@
36258 #include <linux/module.h>
36259 #include <linux/rtc.h>
36260 #include <linux/sched.h>
36261+#include <linux/grsecurity.h>
36262 #include "rtc-core.h"
36263
36264 static dev_t rtc_devt;
36265@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
36266 if (copy_from_user(&tm, uarg, sizeof(tm)))
36267 return -EFAULT;
36268
36269+ gr_log_timechange();
36270+
36271 return rtc_set_time(rtc, &tm);
36272
36273 case RTC_PIE_ON:
36274diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
36275index 3fcf627..f334910 100644
36276--- a/drivers/scsi/aacraid/aacraid.h
36277+++ b/drivers/scsi/aacraid/aacraid.h
36278@@ -492,7 +492,7 @@ struct adapter_ops
36279 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36280 /* Administrative operations */
36281 int (*adapter_comm)(struct aac_dev * dev, int comm);
36282-};
36283+} __no_const;
36284
36285 /*
36286 * Define which interrupt handler needs to be installed
36287diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
36288index 0d279c44..3d25a97 100644
36289--- a/drivers/scsi/aacraid/linit.c
36290+++ b/drivers/scsi/aacraid/linit.c
36291@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
36292 #elif defined(__devinitconst)
36293 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36294 #else
36295-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
36296+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36297 #endif
36298 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
36299 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
36300diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
36301index ff80552..1c4120c 100644
36302--- a/drivers/scsi/aic94xx/aic94xx_init.c
36303+++ b/drivers/scsi/aic94xx/aic94xx_init.c
36304@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
36305 .lldd_ata_set_dmamode = asd_set_dmamode,
36306 };
36307
36308-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
36309+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
36310 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
36311 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
36312 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
36313diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
36314index 4ad7e36..d004679 100644
36315--- a/drivers/scsi/bfa/bfa.h
36316+++ b/drivers/scsi/bfa/bfa.h
36317@@ -196,7 +196,7 @@ struct bfa_hwif_s {
36318 u32 *end);
36319 int cpe_vec_q0;
36320 int rme_vec_q0;
36321-};
36322+} __no_const;
36323 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36324
36325 struct bfa_faa_cbfn_s {
36326diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
36327index f0f80e2..8ec946b 100644
36328--- a/drivers/scsi/bfa/bfa_fcpim.c
36329+++ b/drivers/scsi/bfa/bfa_fcpim.c
36330@@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
36331
36332 bfa_iotag_attach(fcp);
36333
36334- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
36335+ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
36336 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
36337 (fcp->num_itns * sizeof(struct bfa_itn_s));
36338 memset(fcp->itn_arr, 0,
36339@@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36340 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
36341 {
36342 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
36343- struct bfa_itn_s *itn;
36344+ bfa_itn_s_no_const *itn;
36345
36346 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
36347 itn->isr = isr;
36348diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
36349index 36f26da..38a34a8 100644
36350--- a/drivers/scsi/bfa/bfa_fcpim.h
36351+++ b/drivers/scsi/bfa/bfa_fcpim.h
36352@@ -37,6 +37,7 @@ struct bfa_iotag_s {
36353 struct bfa_itn_s {
36354 bfa_isr_func_t isr;
36355 };
36356+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
36357
36358 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36359 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
36360@@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
36361 struct list_head iotag_tio_free_q; /* free IO resources */
36362 struct list_head iotag_unused_q; /* unused IO resources*/
36363 struct bfa_iotag_s *iotag_arr;
36364- struct bfa_itn_s *itn_arr;
36365+ bfa_itn_s_no_const *itn_arr;
36366 int num_ioim_reqs;
36367 int num_fwtio_reqs;
36368 int num_itns;
36369diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
36370index 1a99d4b..e85d64b 100644
36371--- a/drivers/scsi/bfa/bfa_ioc.h
36372+++ b/drivers/scsi/bfa/bfa_ioc.h
36373@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
36374 bfa_ioc_disable_cbfn_t disable_cbfn;
36375 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36376 bfa_ioc_reset_cbfn_t reset_cbfn;
36377-};
36378+} __no_const;
36379
36380 /*
36381 * IOC event notification mechanism.
36382@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
36383 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
36384 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
36385 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
36386-};
36387+} __no_const;
36388
36389 /*
36390 * Queue element to wait for room in request queue. FIFO order is
36391diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
36392index b48c24f..dac0fbc 100644
36393--- a/drivers/scsi/hosts.c
36394+++ b/drivers/scsi/hosts.c
36395@@ -42,7 +42,7 @@
36396 #include "scsi_logging.h"
36397
36398
36399-static atomic_t scsi_host_next_hn; /* host_no for next new host */
36400+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36401
36402
36403 static void scsi_host_cls_release(struct device *dev)
36404@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
36405 * subtract one because we increment first then return, but we need to
36406 * know what the next host number was before increment
36407 */
36408- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36409+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36410 shost->dma_channel = 0xff;
36411
36412 /* These three are default values which can be overridden */
36413diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
36414index 796482b..d08435c 100644
36415--- a/drivers/scsi/hpsa.c
36416+++ b/drivers/scsi/hpsa.c
36417@@ -536,7 +536,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
36418 unsigned long flags;
36419
36420 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36421- return h->access.command_completed(h, q);
36422+ return h->access->command_completed(h, q);
36423
36424 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
36425 a = rq->head[rq->current_entry];
36426@@ -3354,7 +3354,7 @@ static void start_io(struct ctlr_info *h)
36427 while (!list_empty(&h->reqQ)) {
36428 c = list_entry(h->reqQ.next, struct CommandList, list);
36429 /* can't do anything if fifo is full */
36430- if ((h->access.fifo_full(h))) {
36431+ if ((h->access->fifo_full(h))) {
36432 dev_warn(&h->pdev->dev, "fifo full\n");
36433 break;
36434 }
36435@@ -3376,7 +3376,7 @@ static void start_io(struct ctlr_info *h)
36436
36437 /* Tell the controller execute command */
36438 spin_unlock_irqrestore(&h->lock, flags);
36439- h->access.submit_command(h, c);
36440+ h->access->submit_command(h, c);
36441 spin_lock_irqsave(&h->lock, flags);
36442 }
36443 spin_unlock_irqrestore(&h->lock, flags);
36444@@ -3384,17 +3384,17 @@ static void start_io(struct ctlr_info *h)
36445
36446 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
36447 {
36448- return h->access.command_completed(h, q);
36449+ return h->access->command_completed(h, q);
36450 }
36451
36452 static inline bool interrupt_pending(struct ctlr_info *h)
36453 {
36454- return h->access.intr_pending(h);
36455+ return h->access->intr_pending(h);
36456 }
36457
36458 static inline long interrupt_not_for_us(struct ctlr_info *h)
36459 {
36460- return (h->access.intr_pending(h) == 0) ||
36461+ return (h->access->intr_pending(h) == 0) ||
36462 (h->interrupts_enabled == 0);
36463 }
36464
36465@@ -4298,7 +4298,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
36466 if (prod_index < 0)
36467 return -ENODEV;
36468 h->product_name = products[prod_index].product_name;
36469- h->access = *(products[prod_index].access);
36470+ h->access = products[prod_index].access;
36471
36472 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
36473 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
36474@@ -4580,7 +4580,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
36475
36476 assert_spin_locked(&lockup_detector_lock);
36477 remove_ctlr_from_lockup_detector_list(h);
36478- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36479+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36480 spin_lock_irqsave(&h->lock, flags);
36481 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
36482 spin_unlock_irqrestore(&h->lock, flags);
36483@@ -4758,7 +4758,7 @@ reinit_after_soft_reset:
36484 }
36485
36486 /* make sure the board interrupts are off */
36487- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36488+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36489
36490 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
36491 goto clean2;
36492@@ -4792,7 +4792,7 @@ reinit_after_soft_reset:
36493 * fake ones to scoop up any residual completions.
36494 */
36495 spin_lock_irqsave(&h->lock, flags);
36496- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36497+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36498 spin_unlock_irqrestore(&h->lock, flags);
36499 free_irqs(h);
36500 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
36501@@ -4811,9 +4811,9 @@ reinit_after_soft_reset:
36502 dev_info(&h->pdev->dev, "Board READY.\n");
36503 dev_info(&h->pdev->dev,
36504 "Waiting for stale completions to drain.\n");
36505- h->access.set_intr_mask(h, HPSA_INTR_ON);
36506+ h->access->set_intr_mask(h, HPSA_INTR_ON);
36507 msleep(10000);
36508- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36509+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36510
36511 rc = controller_reset_failed(h->cfgtable);
36512 if (rc)
36513@@ -4834,7 +4834,7 @@ reinit_after_soft_reset:
36514 }
36515
36516 /* Turn the interrupts on so we can service requests */
36517- h->access.set_intr_mask(h, HPSA_INTR_ON);
36518+ h->access->set_intr_mask(h, HPSA_INTR_ON);
36519
36520 hpsa_hba_inquiry(h);
36521 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
36522@@ -4886,7 +4886,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
36523 * To write all data in the battery backed cache to disks
36524 */
36525 hpsa_flush_cache(h);
36526- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36527+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36528 hpsa_free_irqs_and_disable_msix(h);
36529 }
36530
36531@@ -5055,7 +5055,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
36532 return;
36533 }
36534 /* Change the access methods to the performant access methods */
36535- h->access = SA5_performant_access;
36536+ h->access = &SA5_performant_access;
36537 h->transMethod = CFGTBL_Trans_Performant;
36538 }
36539
36540diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
36541index 9816479..c5d4e97 100644
36542--- a/drivers/scsi/hpsa.h
36543+++ b/drivers/scsi/hpsa.h
36544@@ -79,7 +79,7 @@ struct ctlr_info {
36545 unsigned int msix_vector;
36546 unsigned int msi_vector;
36547 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
36548- struct access_method access;
36549+ struct access_method *access;
36550
36551 /* queue and queue Info */
36552 struct list_head reqQ;
36553diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
36554index f2df059..a3a9930 100644
36555--- a/drivers/scsi/ips.h
36556+++ b/drivers/scsi/ips.h
36557@@ -1027,7 +1027,7 @@ typedef struct {
36558 int (*intr)(struct ips_ha *);
36559 void (*enableint)(struct ips_ha *);
36560 uint32_t (*statupd)(struct ips_ha *);
36561-} ips_hw_func_t;
36562+} __no_const ips_hw_func_t;
36563
36564 typedef struct ips_ha {
36565 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36566diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
36567index aceffad..c35c08d 100644
36568--- a/drivers/scsi/libfc/fc_exch.c
36569+++ b/drivers/scsi/libfc/fc_exch.c
36570@@ -105,12 +105,12 @@ struct fc_exch_mgr {
36571 * all together if not used XXX
36572 */
36573 struct {
36574- atomic_t no_free_exch;
36575- atomic_t no_free_exch_xid;
36576- atomic_t xid_not_found;
36577- atomic_t xid_busy;
36578- atomic_t seq_not_found;
36579- atomic_t non_bls_resp;
36580+ atomic_unchecked_t no_free_exch;
36581+ atomic_unchecked_t no_free_exch_xid;
36582+ atomic_unchecked_t xid_not_found;
36583+ atomic_unchecked_t xid_busy;
36584+ atomic_unchecked_t seq_not_found;
36585+ atomic_unchecked_t non_bls_resp;
36586 } stats;
36587 };
36588
36589@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
36590 /* allocate memory for exchange */
36591 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36592 if (!ep) {
36593- atomic_inc(&mp->stats.no_free_exch);
36594+ atomic_inc_unchecked(&mp->stats.no_free_exch);
36595 goto out;
36596 }
36597 memset(ep, 0, sizeof(*ep));
36598@@ -780,7 +780,7 @@ out:
36599 return ep;
36600 err:
36601 spin_unlock_bh(&pool->lock);
36602- atomic_inc(&mp->stats.no_free_exch_xid);
36603+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36604 mempool_free(ep, mp->ep_pool);
36605 return NULL;
36606 }
36607@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36608 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36609 ep = fc_exch_find(mp, xid);
36610 if (!ep) {
36611- atomic_inc(&mp->stats.xid_not_found);
36612+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36613 reject = FC_RJT_OX_ID;
36614 goto out;
36615 }
36616@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36617 ep = fc_exch_find(mp, xid);
36618 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36619 if (ep) {
36620- atomic_inc(&mp->stats.xid_busy);
36621+ atomic_inc_unchecked(&mp->stats.xid_busy);
36622 reject = FC_RJT_RX_ID;
36623 goto rel;
36624 }
36625@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36626 }
36627 xid = ep->xid; /* get our XID */
36628 } else if (!ep) {
36629- atomic_inc(&mp->stats.xid_not_found);
36630+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36631 reject = FC_RJT_RX_ID; /* XID not found */
36632 goto out;
36633 }
36634@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36635 } else {
36636 sp = &ep->seq;
36637 if (sp->id != fh->fh_seq_id) {
36638- atomic_inc(&mp->stats.seq_not_found);
36639+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36640 if (f_ctl & FC_FC_END_SEQ) {
36641 /*
36642 * Update sequence_id based on incoming last
36643@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36644
36645 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36646 if (!ep) {
36647- atomic_inc(&mp->stats.xid_not_found);
36648+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36649 goto out;
36650 }
36651 if (ep->esb_stat & ESB_ST_COMPLETE) {
36652- atomic_inc(&mp->stats.xid_not_found);
36653+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36654 goto rel;
36655 }
36656 if (ep->rxid == FC_XID_UNKNOWN)
36657 ep->rxid = ntohs(fh->fh_rx_id);
36658 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36659- atomic_inc(&mp->stats.xid_not_found);
36660+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36661 goto rel;
36662 }
36663 if (ep->did != ntoh24(fh->fh_s_id) &&
36664 ep->did != FC_FID_FLOGI) {
36665- atomic_inc(&mp->stats.xid_not_found);
36666+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36667 goto rel;
36668 }
36669 sof = fr_sof(fp);
36670@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36671 sp->ssb_stat |= SSB_ST_RESP;
36672 sp->id = fh->fh_seq_id;
36673 } else if (sp->id != fh->fh_seq_id) {
36674- atomic_inc(&mp->stats.seq_not_found);
36675+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36676 goto rel;
36677 }
36678
36679@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36680 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36681
36682 if (!sp)
36683- atomic_inc(&mp->stats.xid_not_found);
36684+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36685 else
36686- atomic_inc(&mp->stats.non_bls_resp);
36687+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
36688
36689 fc_frame_free(fp);
36690 }
36691diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36692index d109cc3..09f4e7d 100644
36693--- a/drivers/scsi/libsas/sas_ata.c
36694+++ b/drivers/scsi/libsas/sas_ata.c
36695@@ -529,7 +529,7 @@ static struct ata_port_operations sas_sata_ops = {
36696 .postreset = ata_std_postreset,
36697 .error_handler = ata_std_error_handler,
36698 .post_internal_cmd = sas_ata_post_internal,
36699- .qc_defer = ata_std_qc_defer,
36700+ .qc_defer = ata_std_qc_defer,
36701 .qc_prep = ata_noop_qc_prep,
36702 .qc_issue = sas_ata_qc_issue,
36703 .qc_fill_rtf = sas_ata_qc_fill_rtf,
36704diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
36705index fe5d396..e93d526 100644
36706--- a/drivers/scsi/lpfc/Makefile
36707+++ b/drivers/scsi/lpfc/Makefile
36708@@ -22,7 +22,7 @@
36709 ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
36710 ccflags-$(GCOV) += -O0
36711
36712-ccflags-y += -Werror
36713+#ccflags-y += -Werror
36714
36715 obj-$(CONFIG_SCSI_LPFC) := lpfc.o
36716
36717diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36718index e5da6da..c888d48 100644
36719--- a/drivers/scsi/lpfc/lpfc.h
36720+++ b/drivers/scsi/lpfc/lpfc.h
36721@@ -416,7 +416,7 @@ struct lpfc_vport {
36722 struct dentry *debug_nodelist;
36723 struct dentry *vport_debugfs_root;
36724 struct lpfc_debugfs_trc *disc_trc;
36725- atomic_t disc_trc_cnt;
36726+ atomic_unchecked_t disc_trc_cnt;
36727 #endif
36728 uint8_t stat_data_enabled;
36729 uint8_t stat_data_blocked;
36730@@ -830,8 +830,8 @@ struct lpfc_hba {
36731 struct timer_list fabric_block_timer;
36732 unsigned long bit_flags;
36733 #define FABRIC_COMANDS_BLOCKED 0
36734- atomic_t num_rsrc_err;
36735- atomic_t num_cmd_success;
36736+ atomic_unchecked_t num_rsrc_err;
36737+ atomic_unchecked_t num_cmd_success;
36738 unsigned long last_rsrc_error_time;
36739 unsigned long last_ramp_down_time;
36740 unsigned long last_ramp_up_time;
36741@@ -867,7 +867,7 @@ struct lpfc_hba {
36742
36743 struct dentry *debug_slow_ring_trc;
36744 struct lpfc_debugfs_trc *slow_ring_trc;
36745- atomic_t slow_ring_trc_cnt;
36746+ atomic_unchecked_t slow_ring_trc_cnt;
36747 /* iDiag debugfs sub-directory */
36748 struct dentry *idiag_root;
36749 struct dentry *idiag_pci_cfg;
36750diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36751index 3217d63..c417981 100644
36752--- a/drivers/scsi/lpfc/lpfc_debugfs.c
36753+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36754@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36755
36756 #include <linux/debugfs.h>
36757
36758-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36759+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36760 static unsigned long lpfc_debugfs_start_time = 0L;
36761
36762 /* iDiag */
36763@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36764 lpfc_debugfs_enable = 0;
36765
36766 len = 0;
36767- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36768+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36769 (lpfc_debugfs_max_disc_trc - 1);
36770 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36771 dtp = vport->disc_trc + i;
36772@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36773 lpfc_debugfs_enable = 0;
36774
36775 len = 0;
36776- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36777+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36778 (lpfc_debugfs_max_slow_ring_trc - 1);
36779 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36780 dtp = phba->slow_ring_trc + i;
36781@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36782 !vport || !vport->disc_trc)
36783 return;
36784
36785- index = atomic_inc_return(&vport->disc_trc_cnt) &
36786+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36787 (lpfc_debugfs_max_disc_trc - 1);
36788 dtp = vport->disc_trc + index;
36789 dtp->fmt = fmt;
36790 dtp->data1 = data1;
36791 dtp->data2 = data2;
36792 dtp->data3 = data3;
36793- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36794+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36795 dtp->jif = jiffies;
36796 #endif
36797 return;
36798@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36799 !phba || !phba->slow_ring_trc)
36800 return;
36801
36802- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36803+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36804 (lpfc_debugfs_max_slow_ring_trc - 1);
36805 dtp = phba->slow_ring_trc + index;
36806 dtp->fmt = fmt;
36807 dtp->data1 = data1;
36808 dtp->data2 = data2;
36809 dtp->data3 = data3;
36810- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36811+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36812 dtp->jif = jiffies;
36813 #endif
36814 return;
36815@@ -4090,7 +4090,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36816 "slow_ring buffer\n");
36817 goto debug_failed;
36818 }
36819- atomic_set(&phba->slow_ring_trc_cnt, 0);
36820+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36821 memset(phba->slow_ring_trc, 0,
36822 (sizeof(struct lpfc_debugfs_trc) *
36823 lpfc_debugfs_max_slow_ring_trc));
36824@@ -4136,7 +4136,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36825 "buffer\n");
36826 goto debug_failed;
36827 }
36828- atomic_set(&vport->disc_trc_cnt, 0);
36829+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36830
36831 snprintf(name, sizeof(name), "discovery_trace");
36832 vport->debug_disc_trc =
36833diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36834index 411ed48..967f553 100644
36835--- a/drivers/scsi/lpfc/lpfc_init.c
36836+++ b/drivers/scsi/lpfc/lpfc_init.c
36837@@ -10341,8 +10341,10 @@ lpfc_init(void)
36838 "misc_register returned with status %d", error);
36839
36840 if (lpfc_enable_npiv) {
36841- lpfc_transport_functions.vport_create = lpfc_vport_create;
36842- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36843+ pax_open_kernel();
36844+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36845+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36846+ pax_close_kernel();
36847 }
36848 lpfc_transport_template =
36849 fc_attach_transport(&lpfc_transport_functions);
36850diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36851index 66e0906..1620281 100644
36852--- a/drivers/scsi/lpfc/lpfc_scsi.c
36853+++ b/drivers/scsi/lpfc/lpfc_scsi.c
36854@@ -311,7 +311,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36855 uint32_t evt_posted;
36856
36857 spin_lock_irqsave(&phba->hbalock, flags);
36858- atomic_inc(&phba->num_rsrc_err);
36859+ atomic_inc_unchecked(&phba->num_rsrc_err);
36860 phba->last_rsrc_error_time = jiffies;
36861
36862 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36863@@ -352,7 +352,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36864 unsigned long flags;
36865 struct lpfc_hba *phba = vport->phba;
36866 uint32_t evt_posted;
36867- atomic_inc(&phba->num_cmd_success);
36868+ atomic_inc_unchecked(&phba->num_cmd_success);
36869
36870 if (vport->cfg_lun_queue_depth <= queue_depth)
36871 return;
36872@@ -396,8 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36873 unsigned long num_rsrc_err, num_cmd_success;
36874 int i;
36875
36876- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36877- num_cmd_success = atomic_read(&phba->num_cmd_success);
36878+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36879+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36880
36881 /*
36882 * The error and success command counters are global per
36883@@ -425,8 +425,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36884 }
36885 }
36886 lpfc_destroy_vport_work_array(phba, vports);
36887- atomic_set(&phba->num_rsrc_err, 0);
36888- atomic_set(&phba->num_cmd_success, 0);
36889+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36890+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36891 }
36892
36893 /**
36894@@ -460,8 +460,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36895 }
36896 }
36897 lpfc_destroy_vport_work_array(phba, vports);
36898- atomic_set(&phba->num_rsrc_err, 0);
36899- atomic_set(&phba->num_cmd_success, 0);
36900+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36901+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36902 }
36903
36904 /**
36905diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36906index ea8a0b4..812a124 100644
36907--- a/drivers/scsi/pmcraid.c
36908+++ b/drivers/scsi/pmcraid.c
36909@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36910 res->scsi_dev = scsi_dev;
36911 scsi_dev->hostdata = res;
36912 res->change_detected = 0;
36913- atomic_set(&res->read_failures, 0);
36914- atomic_set(&res->write_failures, 0);
36915+ atomic_set_unchecked(&res->read_failures, 0);
36916+ atomic_set_unchecked(&res->write_failures, 0);
36917 rc = 0;
36918 }
36919 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36920@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36921
36922 /* If this was a SCSI read/write command keep count of errors */
36923 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36924- atomic_inc(&res->read_failures);
36925+ atomic_inc_unchecked(&res->read_failures);
36926 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36927- atomic_inc(&res->write_failures);
36928+ atomic_inc_unchecked(&res->write_failures);
36929
36930 if (!RES_IS_GSCSI(res->cfg_entry) &&
36931 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36932@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
36933 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36934 * hrrq_id assigned here in queuecommand
36935 */
36936- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36937+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36938 pinstance->num_hrrq;
36939 cmd->cmd_done = pmcraid_io_done;
36940
36941@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
36942 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36943 * hrrq_id assigned here in queuecommand
36944 */
36945- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36946+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36947 pinstance->num_hrrq;
36948
36949 if (request_size) {
36950@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36951
36952 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36953 /* add resources only after host is added into system */
36954- if (!atomic_read(&pinstance->expose_resources))
36955+ if (!atomic_read_unchecked(&pinstance->expose_resources))
36956 return;
36957
36958 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36959@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
36960 init_waitqueue_head(&pinstance->reset_wait_q);
36961
36962 atomic_set(&pinstance->outstanding_cmds, 0);
36963- atomic_set(&pinstance->last_message_id, 0);
36964- atomic_set(&pinstance->expose_resources, 0);
36965+ atomic_set_unchecked(&pinstance->last_message_id, 0);
36966+ atomic_set_unchecked(&pinstance->expose_resources, 0);
36967
36968 INIT_LIST_HEAD(&pinstance->free_res_q);
36969 INIT_LIST_HEAD(&pinstance->used_res_q);
36970@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
36971 /* Schedule worker thread to handle CCN and take care of adding and
36972 * removing devices to OS
36973 */
36974- atomic_set(&pinstance->expose_resources, 1);
36975+ atomic_set_unchecked(&pinstance->expose_resources, 1);
36976 schedule_work(&pinstance->worker_q);
36977 return rc;
36978
36979diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36980index e1d150f..6c6df44 100644
36981--- a/drivers/scsi/pmcraid.h
36982+++ b/drivers/scsi/pmcraid.h
36983@@ -748,7 +748,7 @@ struct pmcraid_instance {
36984 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36985
36986 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
36987- atomic_t last_message_id;
36988+ atomic_unchecked_t last_message_id;
36989
36990 /* configuration table */
36991 struct pmcraid_config_table *cfg_table;
36992@@ -777,7 +777,7 @@ struct pmcraid_instance {
36993 atomic_t outstanding_cmds;
36994
36995 /* should add/delete resources to mid-layer now ?*/
36996- atomic_t expose_resources;
36997+ atomic_unchecked_t expose_resources;
36998
36999
37000
37001@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
37002 struct pmcraid_config_table_entry_ext cfg_entry_ext;
37003 };
37004 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37005- atomic_t read_failures; /* count of failed READ commands */
37006- atomic_t write_failures; /* count of failed WRITE commands */
37007+ atomic_unchecked_t read_failures; /* count of failed READ commands */
37008+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37009
37010 /* To indicate add/delete/modify during CCN */
37011 u8 change_detected;
37012diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
37013index 5ab9530..2dd80f7 100644
37014--- a/drivers/scsi/qla2xxx/qla_attr.c
37015+++ b/drivers/scsi/qla2xxx/qla_attr.c
37016@@ -1855,7 +1855,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
37017 return 0;
37018 }
37019
37020-struct fc_function_template qla2xxx_transport_functions = {
37021+fc_function_template_no_const qla2xxx_transport_functions = {
37022
37023 .show_host_node_name = 1,
37024 .show_host_port_name = 1,
37025@@ -1902,7 +1902,7 @@ struct fc_function_template qla2xxx_transport_functions = {
37026 .bsg_timeout = qla24xx_bsg_timeout,
37027 };
37028
37029-struct fc_function_template qla2xxx_transport_vport_functions = {
37030+fc_function_template_no_const qla2xxx_transport_vport_functions = {
37031
37032 .show_host_node_name = 1,
37033 .show_host_port_name = 1,
37034diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
37035index 39007f5..7fafc64 100644
37036--- a/drivers/scsi/qla2xxx/qla_def.h
37037+++ b/drivers/scsi/qla2xxx/qla_def.h
37038@@ -2284,7 +2284,7 @@ struct isp_operations {
37039 int (*start_scsi) (srb_t *);
37040 int (*abort_isp) (struct scsi_qla_host *);
37041 int (*iospace_config)(struct qla_hw_data*);
37042-};
37043+} __no_const;
37044
37045 /* MSI-X Support *************************************************************/
37046
37047diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
37048index 9eacd2d..d79629c 100644
37049--- a/drivers/scsi/qla2xxx/qla_gbl.h
37050+++ b/drivers/scsi/qla2xxx/qla_gbl.h
37051@@ -484,8 +484,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
37052 struct device_attribute;
37053 extern struct device_attribute *qla2x00_host_attrs[];
37054 struct fc_function_template;
37055-extern struct fc_function_template qla2xxx_transport_functions;
37056-extern struct fc_function_template qla2xxx_transport_vport_functions;
37057+extern fc_function_template_no_const qla2xxx_transport_functions;
37058+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
37059 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
37060 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
37061 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
37062diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
37063index 96a5616..eeb185a 100644
37064--- a/drivers/scsi/qla4xxx/ql4_def.h
37065+++ b/drivers/scsi/qla4xxx/ql4_def.h
37066@@ -268,7 +268,7 @@ struct ddb_entry {
37067 * (4000 only) */
37068 atomic_t relogin_timer; /* Max Time to wait for
37069 * relogin to complete */
37070- atomic_t relogin_retry_count; /* Num of times relogin has been
37071+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37072 * retried */
37073 uint32_t default_time2wait; /* Default Min time between
37074 * relogins (+aens) */
37075diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
37076index cd15678..f7e6846 100644
37077--- a/drivers/scsi/qla4xxx/ql4_os.c
37078+++ b/drivers/scsi/qla4xxx/ql4_os.c
37079@@ -2615,12 +2615,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
37080 */
37081 if (!iscsi_is_session_online(cls_sess)) {
37082 /* Reset retry relogin timer */
37083- atomic_inc(&ddb_entry->relogin_retry_count);
37084+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37085 DEBUG2(ql4_printk(KERN_INFO, ha,
37086 "%s: index[%d] relogin timed out-retrying"
37087 " relogin (%d), retry (%d)\n", __func__,
37088 ddb_entry->fw_ddb_index,
37089- atomic_read(&ddb_entry->relogin_retry_count),
37090+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
37091 ddb_entry->default_time2wait + 4));
37092 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
37093 atomic_set(&ddb_entry->retry_relogin_timer,
37094@@ -4517,7 +4517,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
37095
37096 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37097 atomic_set(&ddb_entry->relogin_timer, 0);
37098- atomic_set(&ddb_entry->relogin_retry_count, 0);
37099+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37100 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
37101 ddb_entry->default_relogin_timeout =
37102 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
37103diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
37104index bbbc9c9..ce22f77 100644
37105--- a/drivers/scsi/scsi.c
37106+++ b/drivers/scsi/scsi.c
37107@@ -659,7 +659,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
37108 unsigned long timeout;
37109 int rtn = 0;
37110
37111- atomic_inc(&cmd->device->iorequest_cnt);
37112+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37113
37114 /* check if the device is still usable */
37115 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37116diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
37117index dae3873..bb4bee6 100644
37118--- a/drivers/scsi/scsi_lib.c
37119+++ b/drivers/scsi/scsi_lib.c
37120@@ -1425,7 +1425,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
37121 shost = sdev->host;
37122 scsi_init_cmd_errh(cmd);
37123 cmd->result = DID_NO_CONNECT << 16;
37124- atomic_inc(&cmd->device->iorequest_cnt);
37125+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37126
37127 /*
37128 * SCSI request completion path will do scsi_device_unbusy(),
37129@@ -1451,9 +1451,9 @@ static void scsi_softirq_done(struct request *rq)
37130
37131 INIT_LIST_HEAD(&cmd->eh_entry);
37132
37133- atomic_inc(&cmd->device->iodone_cnt);
37134+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
37135 if (cmd->result)
37136- atomic_inc(&cmd->device->ioerr_cnt);
37137+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37138
37139 disposition = scsi_decide_disposition(cmd);
37140 if (disposition != SUCCESS &&
37141diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
37142index bb7c482..7551a95 100644
37143--- a/drivers/scsi/scsi_sysfs.c
37144+++ b/drivers/scsi/scsi_sysfs.c
37145@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
37146 char *buf) \
37147 { \
37148 struct scsi_device *sdev = to_scsi_device(dev); \
37149- unsigned long long count = atomic_read(&sdev->field); \
37150+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
37151 return snprintf(buf, 20, "0x%llx\n", count); \
37152 } \
37153 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37154diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
37155index 84a1fdf..693b0d6 100644
37156--- a/drivers/scsi/scsi_tgt_lib.c
37157+++ b/drivers/scsi/scsi_tgt_lib.c
37158@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
37159 int err;
37160
37161 dprintk("%lx %u\n", uaddr, len);
37162- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
37163+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
37164 if (err) {
37165 /*
37166 * TODO: need to fixup sg_tablesize, max_segment_size,
37167diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
37168index 5797604..289a5b5 100644
37169--- a/drivers/scsi/scsi_transport_fc.c
37170+++ b/drivers/scsi/scsi_transport_fc.c
37171@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
37172 * Netlink Infrastructure
37173 */
37174
37175-static atomic_t fc_event_seq;
37176+static atomic_unchecked_t fc_event_seq;
37177
37178 /**
37179 * fc_get_event_number - Obtain the next sequential FC event number
37180@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
37181 u32
37182 fc_get_event_number(void)
37183 {
37184- return atomic_add_return(1, &fc_event_seq);
37185+ return atomic_add_return_unchecked(1, &fc_event_seq);
37186 }
37187 EXPORT_SYMBOL(fc_get_event_number);
37188
37189@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
37190 {
37191 int error;
37192
37193- atomic_set(&fc_event_seq, 0);
37194+ atomic_set_unchecked(&fc_event_seq, 0);
37195
37196 error = transport_class_register(&fc_host_class);
37197 if (error)
37198@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
37199 char *cp;
37200
37201 *val = simple_strtoul(buf, &cp, 0);
37202- if ((*cp && (*cp != '\n')) || (*val < 0))
37203+ if (*cp && (*cp != '\n'))
37204 return -EINVAL;
37205 /*
37206 * Check for overflow; dev_loss_tmo is u32
37207diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
37208index 1cf640e..78e9014 100644
37209--- a/drivers/scsi/scsi_transport_iscsi.c
37210+++ b/drivers/scsi/scsi_transport_iscsi.c
37211@@ -79,7 +79,7 @@ struct iscsi_internal {
37212 struct transport_container session_cont;
37213 };
37214
37215-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37216+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37217 static struct workqueue_struct *iscsi_eh_timer_workq;
37218
37219 static DEFINE_IDA(iscsi_sess_ida);
37220@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
37221 int err;
37222
37223 ihost = shost->shost_data;
37224- session->sid = atomic_add_return(1, &iscsi_session_nr);
37225+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37226
37227 if (target_id == ISCSI_MAX_TARGET) {
37228 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
37229@@ -2940,7 +2940,7 @@ static __init int iscsi_transport_init(void)
37230 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37231 ISCSI_TRANSPORT_VERSION);
37232
37233- atomic_set(&iscsi_session_nr, 0);
37234+ atomic_set_unchecked(&iscsi_session_nr, 0);
37235
37236 err = class_register(&iscsi_transport_class);
37237 if (err)
37238diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
37239index 21a045e..ec89e03 100644
37240--- a/drivers/scsi/scsi_transport_srp.c
37241+++ b/drivers/scsi/scsi_transport_srp.c
37242@@ -33,7 +33,7 @@
37243 #include "scsi_transport_srp_internal.h"
37244
37245 struct srp_host_attrs {
37246- atomic_t next_port_id;
37247+ atomic_unchecked_t next_port_id;
37248 };
37249 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37250
37251@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
37252 struct Scsi_Host *shost = dev_to_shost(dev);
37253 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37254
37255- atomic_set(&srp_host->next_port_id, 0);
37256+ atomic_set_unchecked(&srp_host->next_port_id, 0);
37257 return 0;
37258 }
37259
37260@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
37261 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37262 rport->roles = ids->roles;
37263
37264- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37265+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37266 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37267
37268 transport_setup_device(&rport->dev);
37269diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
37270index 9c5c5f2..8414557 100644
37271--- a/drivers/scsi/sg.c
37272+++ b/drivers/scsi/sg.c
37273@@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
37274 sdp->disk->disk_name,
37275 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
37276 NULL,
37277- (char *)arg);
37278+ (char __user *)arg);
37279 case BLKTRACESTART:
37280 return blk_trace_startstop(sdp->device->request_queue, 1);
37281 case BLKTRACESTOP:
37282diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
37283index 1041cb8..4a946fa 100644
37284--- a/drivers/spi/spi.c
37285+++ b/drivers/spi/spi.c
37286@@ -1453,7 +1453,7 @@ int spi_bus_unlock(struct spi_master *master)
37287 EXPORT_SYMBOL_GPL(spi_bus_unlock);
37288
37289 /* portable code must never pass more than 32 bytes */
37290-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37291+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
37292
37293 static u8 *buf;
37294
37295diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
37296index 34afc16..ffe44dd 100644
37297--- a/drivers/staging/octeon/ethernet-rx.c
37298+++ b/drivers/staging/octeon/ethernet-rx.c
37299@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37300 /* Increment RX stats for virtual ports */
37301 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37302 #ifdef CONFIG_64BIT
37303- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37304- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37305+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37306+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37307 #else
37308- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37309- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37310+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37311+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37312 #endif
37313 }
37314 netif_receive_skb(skb);
37315@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37316 dev->name);
37317 */
37318 #ifdef CONFIG_64BIT
37319- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37320+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37321 #else
37322- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37323+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37324 #endif
37325 dev_kfree_skb_irq(skb);
37326 }
37327diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
37328index 18f7a79..cc3bc24 100644
37329--- a/drivers/staging/octeon/ethernet.c
37330+++ b/drivers/staging/octeon/ethernet.c
37331@@ -259,11 +259,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
37332 * since the RX tasklet also increments it.
37333 */
37334 #ifdef CONFIG_64BIT
37335- atomic64_add(rx_status.dropped_packets,
37336- (atomic64_t *)&priv->stats.rx_dropped);
37337+ atomic64_add_unchecked(rx_status.dropped_packets,
37338+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37339 #else
37340- atomic_add(rx_status.dropped_packets,
37341- (atomic_t *)&priv->stats.rx_dropped);
37342+ atomic_add_unchecked(rx_status.dropped_packets,
37343+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
37344 #endif
37345 }
37346
37347diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37348index dc23395..cf7e9b1 100644
37349--- a/drivers/staging/rtl8712/rtl871x_io.h
37350+++ b/drivers/staging/rtl8712/rtl871x_io.h
37351@@ -108,7 +108,7 @@ struct _io_ops {
37352 u8 *pmem);
37353 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37354 u8 *pmem);
37355-};
37356+} __no_const;
37357
37358 struct io_req {
37359 struct list_head list;
37360diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37361index c7b5e8b..783d6cb 100644
37362--- a/drivers/staging/sbe-2t3e3/netdev.c
37363+++ b/drivers/staging/sbe-2t3e3/netdev.c
37364@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37365 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37366
37367 if (rlen)
37368- if (copy_to_user(data, &resp, rlen))
37369+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37370 return -EFAULT;
37371
37372 return 0;
37373diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
37374index 42cdafe..2769103 100644
37375--- a/drivers/staging/speakup/speakup_soft.c
37376+++ b/drivers/staging/speakup/speakup_soft.c
37377@@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
37378 break;
37379 } else if (!initialized) {
37380 if (*init) {
37381- ch = *init;
37382 init++;
37383 } else {
37384 initialized = 1;
37385 }
37386+ ch = *init;
37387 } else {
37388 ch = synth_buffer_getc();
37389 }
37390diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37391index 5d89c0f..9261317 100644
37392--- a/drivers/staging/usbip/usbip_common.h
37393+++ b/drivers/staging/usbip/usbip_common.h
37394@@ -289,7 +289,7 @@ struct usbip_device {
37395 void (*shutdown)(struct usbip_device *);
37396 void (*reset)(struct usbip_device *);
37397 void (*unusable)(struct usbip_device *);
37398- } eh_ops;
37399+ } __no_const eh_ops;
37400 };
37401
37402 #define kthread_get_run(threadfn, data, namefmt, ...) \
37403diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37404index 88b3298..3783eee 100644
37405--- a/drivers/staging/usbip/vhci.h
37406+++ b/drivers/staging/usbip/vhci.h
37407@@ -88,7 +88,7 @@ struct vhci_hcd {
37408 unsigned resuming:1;
37409 unsigned long re_timeout;
37410
37411- atomic_t seqnum;
37412+ atomic_unchecked_t seqnum;
37413
37414 /*
37415 * NOTE:
37416diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37417index f708cba..2de6d72 100644
37418--- a/drivers/staging/usbip/vhci_hcd.c
37419+++ b/drivers/staging/usbip/vhci_hcd.c
37420@@ -488,7 +488,7 @@ static void vhci_tx_urb(struct urb *urb)
37421 return;
37422 }
37423
37424- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37425+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37426 if (priv->seqnum == 0xffff)
37427 dev_info(&urb->dev->dev, "seqnum max\n");
37428
37429@@ -740,7 +740,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37430 return -ENOMEM;
37431 }
37432
37433- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37434+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37435 if (unlink->seqnum == 0xffff)
37436 pr_info("seqnum max\n");
37437
37438@@ -928,7 +928,7 @@ static int vhci_start(struct usb_hcd *hcd)
37439 vdev->rhport = rhport;
37440 }
37441
37442- atomic_set(&vhci->seqnum, 0);
37443+ atomic_set_unchecked(&vhci->seqnum, 0);
37444 spin_lock_init(&vhci->lock);
37445
37446 hcd->power_budget = 0; /* no limit */
37447diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37448index f0eaf04..5a82e06 100644
37449--- a/drivers/staging/usbip/vhci_rx.c
37450+++ b/drivers/staging/usbip/vhci_rx.c
37451@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37452 if (!urb) {
37453 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37454 pr_info("max seqnum %d\n",
37455- atomic_read(&the_controller->seqnum));
37456+ atomic_read_unchecked(&the_controller->seqnum));
37457 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37458 return;
37459 }
37460diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37461index 7735027..30eed13 100644
37462--- a/drivers/staging/vt6655/hostap.c
37463+++ b/drivers/staging/vt6655/hostap.c
37464@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37465 *
37466 */
37467
37468+static net_device_ops_no_const apdev_netdev_ops;
37469+
37470 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37471 {
37472 PSDevice apdev_priv;
37473 struct net_device *dev = pDevice->dev;
37474 int ret;
37475- const struct net_device_ops apdev_netdev_ops = {
37476- .ndo_start_xmit = pDevice->tx_80211,
37477- };
37478
37479 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37480
37481@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37482 *apdev_priv = *pDevice;
37483 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37484
37485+ /* only half broken now */
37486+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37487 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37488
37489 pDevice->apdev->type = ARPHRD_IEEE80211;
37490diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37491index 51b5adf..098e320 100644
37492--- a/drivers/staging/vt6656/hostap.c
37493+++ b/drivers/staging/vt6656/hostap.c
37494@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37495 *
37496 */
37497
37498+static net_device_ops_no_const apdev_netdev_ops;
37499+
37500 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37501 {
37502 PSDevice apdev_priv;
37503 struct net_device *dev = pDevice->dev;
37504 int ret;
37505- const struct net_device_ops apdev_netdev_ops = {
37506- .ndo_start_xmit = pDevice->tx_80211,
37507- };
37508
37509 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37510
37511@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37512 *apdev_priv = *pDevice;
37513 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37514
37515+ /* only half broken now */
37516+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37517 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37518
37519 pDevice->apdev->type = ARPHRD_IEEE80211;
37520diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37521index 7843dfd..3db105f 100644
37522--- a/drivers/staging/wlan-ng/hfa384x_usb.c
37523+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37524@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37525
37526 struct usbctlx_completor {
37527 int (*complete) (struct usbctlx_completor *);
37528-};
37529+} __no_const;
37530
37531 static int
37532 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37533diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37534index 1ca66ea..76f1343 100644
37535--- a/drivers/staging/zcache/tmem.c
37536+++ b/drivers/staging/zcache/tmem.c
37537@@ -39,7 +39,7 @@
37538 * A tmem host implementation must use this function to register callbacks
37539 * for memory allocation.
37540 */
37541-static struct tmem_hostops tmem_hostops;
37542+static tmem_hostops_no_const tmem_hostops;
37543
37544 static void tmem_objnode_tree_init(void);
37545
37546@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37547 * A tmem host implementation must use this function to register
37548 * callbacks for a page-accessible memory (PAM) implementation
37549 */
37550-static struct tmem_pamops tmem_pamops;
37551+static tmem_pamops_no_const tmem_pamops;
37552
37553 void tmem_register_pamops(struct tmem_pamops *m)
37554 {
37555diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37556index 0d4aa82..f7832d4 100644
37557--- a/drivers/staging/zcache/tmem.h
37558+++ b/drivers/staging/zcache/tmem.h
37559@@ -180,6 +180,7 @@ struct tmem_pamops {
37560 void (*new_obj)(struct tmem_obj *);
37561 int (*replace_in_obj)(void *, struct tmem_obj *);
37562 };
37563+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37564 extern void tmem_register_pamops(struct tmem_pamops *m);
37565
37566 /* memory allocation methods provided by the host implementation */
37567@@ -189,6 +190,7 @@ struct tmem_hostops {
37568 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37569 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37570 };
37571+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37572 extern void tmem_register_hostops(struct tmem_hostops *m);
37573
37574 /* core tmem accessor functions */
37575diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37576index df9824c..f699b8a 100644
37577--- a/drivers/target/target_core_transport.c
37578+++ b/drivers/target/target_core_transport.c
37579@@ -1233,7 +1233,7 @@ struct se_device *transport_add_device_to_core_hba(
37580 spin_lock_init(&dev->se_port_lock);
37581 spin_lock_init(&dev->se_tmr_lock);
37582 spin_lock_init(&dev->qf_cmd_lock);
37583- atomic_set(&dev->dev_ordered_id, 0);
37584+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
37585
37586 se_dev_set_default_attribs(dev, dev_limits);
37587
37588@@ -1402,7 +1402,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37589 * Used to determine when ORDERED commands should go from
37590 * Dormant to Active status.
37591 */
37592- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37593+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37594 smp_mb__after_atomic_inc();
37595 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37596 cmd->se_ordered_id, cmd->sam_task_attr,
37597diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
37598index e61cabd..7617d26 100644
37599--- a/drivers/tty/cyclades.c
37600+++ b/drivers/tty/cyclades.c
37601@@ -1589,10 +1589,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
37602 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
37603 info->port.count);
37604 #endif
37605- info->port.count++;
37606+ atomic_inc(&info->port.count);
37607 #ifdef CY_DEBUG_COUNT
37608 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
37609- current->pid, info->port.count);
37610+ current->pid, atomic_read(&info->port.count));
37611 #endif
37612
37613 /*
37614@@ -3987,7 +3987,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
37615 for (j = 0; j < cy_card[i].nports; j++) {
37616 info = &cy_card[i].ports[j];
37617
37618- if (info->port.count) {
37619+ if (atomic_read(&info->port.count)) {
37620 /* XXX is the ldisc num worth this? */
37621 struct tty_struct *tty;
37622 struct tty_ldisc *ld;
37623diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
37624index 2d691eb..be02ebd 100644
37625--- a/drivers/tty/hvc/hvc_console.c
37626+++ b/drivers/tty/hvc/hvc_console.c
37627@@ -315,7 +315,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
37628
37629 spin_lock_irqsave(&hp->port.lock, flags);
37630 /* Check and then increment for fast path open. */
37631- if (hp->port.count++ > 0) {
37632+ if (atomic_inc_return(&hp->port.count) > 1) {
37633 spin_unlock_irqrestore(&hp->port.lock, flags);
37634 hvc_kick();
37635 return 0;
37636@@ -366,7 +366,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
37637
37638 spin_lock_irqsave(&hp->port.lock, flags);
37639
37640- if (--hp->port.count == 0) {
37641+ if (atomic_dec_return(&hp->port.count) == 0) {
37642 spin_unlock_irqrestore(&hp->port.lock, flags);
37643 /* We are done with the tty pointer now. */
37644 tty_port_tty_set(&hp->port, NULL);
37645@@ -384,9 +384,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
37646 */
37647 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
37648 } else {
37649- if (hp->port.count < 0)
37650+ if (atomic_read(&hp->port.count) < 0)
37651 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
37652- hp->vtermno, hp->port.count);
37653+ hp->vtermno, atomic_read(&hp->port.count));
37654 spin_unlock_irqrestore(&hp->port.lock, flags);
37655 }
37656
37657@@ -412,13 +412,13 @@ static void hvc_hangup(struct tty_struct *tty)
37658 * open->hangup case this can be called after the final close so prevent
37659 * that from happening for now.
37660 */
37661- if (hp->port.count <= 0) {
37662+ if (atomic_read(&hp->port.count) <= 0) {
37663 spin_unlock_irqrestore(&hp->port.lock, flags);
37664 return;
37665 }
37666
37667- temp_open_count = hp->port.count;
37668- hp->port.count = 0;
37669+ temp_open_count = atomic_read(&hp->port.count);
37670+ atomic_set(&hp->port.count, 0);
37671 spin_unlock_irqrestore(&hp->port.lock, flags);
37672 tty_port_tty_set(&hp->port, NULL);
37673
37674@@ -471,7 +471,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
37675 return -EPIPE;
37676
37677 /* FIXME what's this (unprotected) check for? */
37678- if (hp->port.count <= 0)
37679+ if (atomic_read(&hp->port.count) <= 0)
37680 return -EIO;
37681
37682 spin_lock_irqsave(&hp->lock, flags);
37683diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37684index d56788c..12d8f85 100644
37685--- a/drivers/tty/hvc/hvcs.c
37686+++ b/drivers/tty/hvc/hvcs.c
37687@@ -83,6 +83,7 @@
37688 #include <asm/hvcserver.h>
37689 #include <asm/uaccess.h>
37690 #include <asm/vio.h>
37691+#include <asm/local.h>
37692
37693 /*
37694 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37695@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37696
37697 spin_lock_irqsave(&hvcsd->lock, flags);
37698
37699- if (hvcsd->port.count > 0) {
37700+ if (atomic_read(&hvcsd->port.count) > 0) {
37701 spin_unlock_irqrestore(&hvcsd->lock, flags);
37702 printk(KERN_INFO "HVCS: vterm state unchanged. "
37703 "The hvcs device node is still in use.\n");
37704@@ -1134,7 +1135,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37705 if ((retval = hvcs_partner_connect(hvcsd)))
37706 goto error_release;
37707
37708- hvcsd->port.count = 1;
37709+ atomic_set(&hvcsd->port.count, 1);
37710 hvcsd->port.tty = tty;
37711 tty->driver_data = hvcsd;
37712
37713@@ -1168,7 +1169,7 @@ fast_open:
37714
37715 spin_lock_irqsave(&hvcsd->lock, flags);
37716 tty_port_get(&hvcsd->port);
37717- hvcsd->port.count++;
37718+ atomic_inc(&hvcsd->port.count);
37719 hvcsd->todo_mask |= HVCS_SCHED_READ;
37720 spin_unlock_irqrestore(&hvcsd->lock, flags);
37721
37722@@ -1212,7 +1213,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37723 hvcsd = tty->driver_data;
37724
37725 spin_lock_irqsave(&hvcsd->lock, flags);
37726- if (--hvcsd->port.count == 0) {
37727+ if (atomic_dec_and_test(&hvcsd->port.count)) {
37728
37729 vio_disable_interrupts(hvcsd->vdev);
37730
37731@@ -1238,10 +1239,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37732 free_irq(irq, hvcsd);
37733 tty_port_put(&hvcsd->port);
37734 return;
37735- } else if (hvcsd->port.count < 0) {
37736+ } else if (atomic_read(&hvcsd->port.count) < 0) {
37737 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37738 " is missmanaged.\n",
37739- hvcsd->vdev->unit_address, hvcsd->port.count);
37740+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
37741 }
37742
37743 spin_unlock_irqrestore(&hvcsd->lock, flags);
37744@@ -1257,7 +1258,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37745
37746 spin_lock_irqsave(&hvcsd->lock, flags);
37747 /* Preserve this so that we know how many kref refs to put */
37748- temp_open_count = hvcsd->port.count;
37749+ temp_open_count = atomic_read(&hvcsd->port.count);
37750
37751 /*
37752 * Don't kref put inside the spinlock because the destruction
37753@@ -1272,7 +1273,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37754 tty->driver_data = NULL;
37755 hvcsd->port.tty = NULL;
37756
37757- hvcsd->port.count = 0;
37758+ atomic_set(&hvcsd->port.count, 0);
37759
37760 /* This will drop any buffered data on the floor which is OK in a hangup
37761 * scenario. */
37762@@ -1343,7 +1344,7 @@ static int hvcs_write(struct tty_struct *tty,
37763 * the middle of a write operation? This is a crummy place to do this
37764 * but we want to keep it all in the spinlock.
37765 */
37766- if (hvcsd->port.count <= 0) {
37767+ if (atomic_read(&hvcsd->port.count) <= 0) {
37768 spin_unlock_irqrestore(&hvcsd->lock, flags);
37769 return -ENODEV;
37770 }
37771@@ -1417,7 +1418,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37772 {
37773 struct hvcs_struct *hvcsd = tty->driver_data;
37774
37775- if (!hvcsd || hvcsd->port.count <= 0)
37776+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
37777 return 0;
37778
37779 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37780diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37781index f8b5fa0..4ba9f89 100644
37782--- a/drivers/tty/ipwireless/tty.c
37783+++ b/drivers/tty/ipwireless/tty.c
37784@@ -29,6 +29,7 @@
37785 #include <linux/tty_driver.h>
37786 #include <linux/tty_flip.h>
37787 #include <linux/uaccess.h>
37788+#include <asm/local.h>
37789
37790 #include "tty.h"
37791 #include "network.h"
37792@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37793 mutex_unlock(&tty->ipw_tty_mutex);
37794 return -ENODEV;
37795 }
37796- if (tty->port.count == 0)
37797+ if (atomic_read(&tty->port.count) == 0)
37798 tty->tx_bytes_queued = 0;
37799
37800- tty->port.count++;
37801+ atomic_inc(&tty->port.count);
37802
37803 tty->port.tty = linux_tty;
37804 linux_tty->driver_data = tty;
37805@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37806
37807 static void do_ipw_close(struct ipw_tty *tty)
37808 {
37809- tty->port.count--;
37810-
37811- if (tty->port.count == 0) {
37812+ if (atomic_dec_return(&tty->port.count) == 0) {
37813 struct tty_struct *linux_tty = tty->port.tty;
37814
37815 if (linux_tty != NULL) {
37816@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37817 return;
37818
37819 mutex_lock(&tty->ipw_tty_mutex);
37820- if (tty->port.count == 0) {
37821+ if (atomic_read(&tty->port.count) == 0) {
37822 mutex_unlock(&tty->ipw_tty_mutex);
37823 return;
37824 }
37825@@ -170,7 +169,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37826 return;
37827 }
37828
37829- if (!tty->port.count) {
37830+ if (!atomic_read(&tty->port.count)) {
37831 mutex_unlock(&tty->ipw_tty_mutex);
37832 return;
37833 }
37834@@ -212,7 +211,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37835 return -ENODEV;
37836
37837 mutex_lock(&tty->ipw_tty_mutex);
37838- if (!tty->port.count) {
37839+ if (!atomic_read(&tty->port.count)) {
37840 mutex_unlock(&tty->ipw_tty_mutex);
37841 return -EINVAL;
37842 }
37843@@ -252,7 +251,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37844 if (!tty)
37845 return -ENODEV;
37846
37847- if (!tty->port.count)
37848+ if (!atomic_read(&tty->port.count))
37849 return -EINVAL;
37850
37851 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37852@@ -294,7 +293,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37853 if (!tty)
37854 return 0;
37855
37856- if (!tty->port.count)
37857+ if (!atomic_read(&tty->port.count))
37858 return 0;
37859
37860 return tty->tx_bytes_queued;
37861@@ -375,7 +374,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37862 if (!tty)
37863 return -ENODEV;
37864
37865- if (!tty->port.count)
37866+ if (!atomic_read(&tty->port.count))
37867 return -EINVAL;
37868
37869 return get_control_lines(tty);
37870@@ -391,7 +390,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37871 if (!tty)
37872 return -ENODEV;
37873
37874- if (!tty->port.count)
37875+ if (!atomic_read(&tty->port.count))
37876 return -EINVAL;
37877
37878 return set_control_lines(tty, set, clear);
37879@@ -405,7 +404,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37880 if (!tty)
37881 return -ENODEV;
37882
37883- if (!tty->port.count)
37884+ if (!atomic_read(&tty->port.count))
37885 return -EINVAL;
37886
37887 /* FIXME: Exactly how is the tty object locked here .. */
37888@@ -561,7 +560,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37889 * are gone */
37890 mutex_lock(&ttyj->ipw_tty_mutex);
37891 }
37892- while (ttyj->port.count)
37893+ while (atomic_read(&ttyj->port.count))
37894 do_ipw_close(ttyj);
37895 ipwireless_disassociate_network_ttys(network,
37896 ttyj->channel_idx);
37897diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
37898index 324467d..504cc25 100644
37899--- a/drivers/tty/moxa.c
37900+++ b/drivers/tty/moxa.c
37901@@ -1172,7 +1172,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
37902 }
37903
37904 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
37905- ch->port.count++;
37906+ atomic_inc(&ch->port.count);
37907 tty->driver_data = ch;
37908 tty_port_tty_set(&ch->port, tty);
37909 mutex_lock(&ch->port.mutex);
37910diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37911index c43b683..4dab83e 100644
37912--- a/drivers/tty/n_gsm.c
37913+++ b/drivers/tty/n_gsm.c
37914@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37915 kref_init(&dlci->ref);
37916 mutex_init(&dlci->mutex);
37917 dlci->fifo = &dlci->_fifo;
37918- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37919+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37920 kfree(dlci);
37921 return NULL;
37922 }
37923@@ -2895,7 +2895,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
37924 if (dlci == NULL)
37925 return -ENOMEM;
37926 port = &dlci->port;
37927- port->count++;
37928+ atomic_inc(&port->count);
37929 tty->driver_data = dlci;
37930 dlci_get(dlci);
37931 dlci_get(dlci->gsm->dlci[0]);
37932diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37933index ee1c268..0e97caf 100644
37934--- a/drivers/tty/n_tty.c
37935+++ b/drivers/tty/n_tty.c
37936@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37937 {
37938 *ops = tty_ldisc_N_TTY;
37939 ops->owner = NULL;
37940- ops->refcount = ops->flags = 0;
37941+ atomic_set(&ops->refcount, 0);
37942+ ops->flags = 0;
37943 }
37944 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37945diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37946index 5505ffc..7affff9 100644
37947--- a/drivers/tty/pty.c
37948+++ b/drivers/tty/pty.c
37949@@ -718,8 +718,10 @@ static void __init unix98_pty_init(void)
37950 panic("Couldn't register Unix98 pts driver");
37951
37952 /* Now create the /dev/ptmx special device */
37953+ pax_open_kernel();
37954 tty_default_fops(&ptmx_fops);
37955- ptmx_fops.open = ptmx_open;
37956+ *(void **)&ptmx_fops.open = ptmx_open;
37957+ pax_close_kernel();
37958
37959 cdev_init(&ptmx_cdev, &ptmx_fops);
37960 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37961diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
37962index 777d5f9..56d67ca 100644
37963--- a/drivers/tty/rocket.c
37964+++ b/drivers/tty/rocket.c
37965@@ -924,7 +924,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
37966 tty->driver_data = info;
37967 tty_port_tty_set(port, tty);
37968
37969- if (port->count++ == 0) {
37970+ if (atomic_inc_return(&port->count) == 1) {
37971 atomic_inc(&rp_num_ports_open);
37972
37973 #ifdef ROCKET_DEBUG_OPEN
37974@@ -933,7 +933,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
37975 #endif
37976 }
37977 #ifdef ROCKET_DEBUG_OPEN
37978- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
37979+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
37980 #endif
37981
37982 /*
37983@@ -1528,7 +1528,7 @@ static void rp_hangup(struct tty_struct *tty)
37984 spin_unlock_irqrestore(&info->port.lock, flags);
37985 return;
37986 }
37987- if (info->port.count)
37988+ if (atomic_read(&info->port.count))
37989 atomic_dec(&rp_num_ports_open);
37990 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
37991 spin_unlock_irqrestore(&info->port.lock, flags);
37992diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37993index 2b42a01..32a2ed3 100644
37994--- a/drivers/tty/serial/kgdboc.c
37995+++ b/drivers/tty/serial/kgdboc.c
37996@@ -24,8 +24,9 @@
37997 #define MAX_CONFIG_LEN 40
37998
37999 static struct kgdb_io kgdboc_io_ops;
38000+static struct kgdb_io kgdboc_io_ops_console;
38001
38002-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
38003+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
38004 static int configured = -1;
38005
38006 static char config[MAX_CONFIG_LEN];
38007@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
38008 kgdboc_unregister_kbd();
38009 if (configured == 1)
38010 kgdb_unregister_io_module(&kgdboc_io_ops);
38011+ else if (configured == 2)
38012+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
38013 }
38014
38015 static int configure_kgdboc(void)
38016@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
38017 int err;
38018 char *cptr = config;
38019 struct console *cons;
38020+ int is_console = 0;
38021
38022 err = kgdboc_option_setup(config);
38023 if (err || !strlen(config) || isspace(config[0]))
38024 goto noconfig;
38025
38026 err = -ENODEV;
38027- kgdboc_io_ops.is_console = 0;
38028 kgdb_tty_driver = NULL;
38029
38030 kgdboc_use_kms = 0;
38031@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
38032 int idx;
38033 if (cons->device && cons->device(cons, &idx) == p &&
38034 idx == tty_line) {
38035- kgdboc_io_ops.is_console = 1;
38036+ is_console = 1;
38037 break;
38038 }
38039 cons = cons->next;
38040@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
38041 kgdb_tty_line = tty_line;
38042
38043 do_register:
38044- err = kgdb_register_io_module(&kgdboc_io_ops);
38045+ if (is_console) {
38046+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
38047+ configured = 2;
38048+ } else {
38049+ err = kgdb_register_io_module(&kgdboc_io_ops);
38050+ configured = 1;
38051+ }
38052 if (err)
38053 goto noconfig;
38054
38055- configured = 1;
38056-
38057 return 0;
38058
38059 noconfig:
38060@@ -213,7 +220,7 @@ noconfig:
38061 static int __init init_kgdboc(void)
38062 {
38063 /* Already configured? */
38064- if (configured == 1)
38065+ if (configured >= 1)
38066 return 0;
38067
38068 return configure_kgdboc();
38069@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
38070 if (config[len - 1] == '\n')
38071 config[len - 1] = '\0';
38072
38073- if (configured == 1)
38074+ if (configured >= 1)
38075 cleanup_kgdboc();
38076
38077 /* Go and configure with the new params. */
38078@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
38079 .post_exception = kgdboc_post_exp_handler,
38080 };
38081
38082+static struct kgdb_io kgdboc_io_ops_console = {
38083+ .name = "kgdboc",
38084+ .read_char = kgdboc_get_char,
38085+ .write_char = kgdboc_put_char,
38086+ .pre_exception = kgdboc_pre_exp_handler,
38087+ .post_exception = kgdboc_post_exp_handler,
38088+ .is_console = 1
38089+};
38090+
38091 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
38092 /* This is only available if kgdboc is a built in for early debugging */
38093 static int __init kgdboc_early_init(char *opt)
38094diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
38095index 246b823..9e0db76 100644
38096--- a/drivers/tty/serial/serial_core.c
38097+++ b/drivers/tty/serial/serial_core.c
38098@@ -1392,7 +1392,7 @@ static void uart_hangup(struct tty_struct *tty)
38099 uart_flush_buffer(tty);
38100 uart_shutdown(tty, state);
38101 spin_lock_irqsave(&port->lock, flags);
38102- port->count = 0;
38103+ atomic_set(&port->count, 0);
38104 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
38105 spin_unlock_irqrestore(&port->lock, flags);
38106 tty_port_tty_set(port, NULL);
38107@@ -1488,7 +1488,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
38108 goto end;
38109 }
38110
38111- port->count++;
38112+ atomic_inc(&port->count);
38113 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
38114 retval = -ENXIO;
38115 goto err_dec_count;
38116@@ -1515,7 +1515,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
38117 /*
38118 * Make sure the device is in D0 state.
38119 */
38120- if (port->count == 1)
38121+ if (atomic_read(&port->count) == 1)
38122 uart_change_pm(state, 0);
38123
38124 /*
38125@@ -1533,7 +1533,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
38126 end:
38127 return retval;
38128 err_dec_count:
38129- port->count--;
38130+ atomic_inc(&port->count);
38131 mutex_unlock(&port->mutex);
38132 goto end;
38133 }
38134diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
38135index 593d40a..bdc61f3 100644
38136--- a/drivers/tty/synclink.c
38137+++ b/drivers/tty/synclink.c
38138@@ -3095,7 +3095,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
38139
38140 if (debug_level >= DEBUG_LEVEL_INFO)
38141 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
38142- __FILE__,__LINE__, info->device_name, info->port.count);
38143+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
38144
38145 if (tty_port_close_start(&info->port, tty, filp) == 0)
38146 goto cleanup;
38147@@ -3113,7 +3113,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
38148 cleanup:
38149 if (debug_level >= DEBUG_LEVEL_INFO)
38150 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
38151- tty->driver->name, info->port.count);
38152+ tty->driver->name, atomic_read(&info->port.count));
38153
38154 } /* end of mgsl_close() */
38155
38156@@ -3212,8 +3212,8 @@ static void mgsl_hangup(struct tty_struct *tty)
38157
38158 mgsl_flush_buffer(tty);
38159 shutdown(info);
38160-
38161- info->port.count = 0;
38162+
38163+ atomic_set(&info->port.count, 0);
38164 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
38165 info->port.tty = NULL;
38166
38167@@ -3302,12 +3302,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
38168
38169 if (debug_level >= DEBUG_LEVEL_INFO)
38170 printk("%s(%d):block_til_ready before block on %s count=%d\n",
38171- __FILE__,__LINE__, tty->driver->name, port->count );
38172+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38173
38174 spin_lock_irqsave(&info->irq_spinlock, flags);
38175 if (!tty_hung_up_p(filp)) {
38176 extra_count = true;
38177- port->count--;
38178+ atomic_dec(&port->count);
38179 }
38180 spin_unlock_irqrestore(&info->irq_spinlock, flags);
38181 port->blocked_open++;
38182@@ -3336,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
38183
38184 if (debug_level >= DEBUG_LEVEL_INFO)
38185 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
38186- __FILE__,__LINE__, tty->driver->name, port->count );
38187+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38188
38189 tty_unlock();
38190 schedule();
38191@@ -3348,12 +3348,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
38192
38193 /* FIXME: Racy on hangup during close wait */
38194 if (extra_count)
38195- port->count++;
38196+ atomic_inc(&port->count);
38197 port->blocked_open--;
38198
38199 if (debug_level >= DEBUG_LEVEL_INFO)
38200 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
38201- __FILE__,__LINE__, tty->driver->name, port->count );
38202+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38203
38204 if (!retval)
38205 port->flags |= ASYNC_NORMAL_ACTIVE;
38206@@ -3398,7 +3398,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
38207
38208 if (debug_level >= DEBUG_LEVEL_INFO)
38209 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
38210- __FILE__,__LINE__,tty->driver->name, info->port.count);
38211+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
38212
38213 /* If port is closing, signal caller to try again */
38214 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
38215@@ -3417,10 +3417,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
38216 spin_unlock_irqrestore(&info->netlock, flags);
38217 goto cleanup;
38218 }
38219- info->port.count++;
38220+ atomic_inc(&info->port.count);
38221 spin_unlock_irqrestore(&info->netlock, flags);
38222
38223- if (info->port.count == 1) {
38224+ if (atomic_read(&info->port.count) == 1) {
38225 /* 1st open on this device, init hardware */
38226 retval = startup(info);
38227 if (retval < 0)
38228@@ -3444,8 +3444,8 @@ cleanup:
38229 if (retval) {
38230 if (tty->count == 1)
38231 info->port.tty = NULL; /* tty layer will release tty struct */
38232- if(info->port.count)
38233- info->port.count--;
38234+ if (atomic_read(&info->port.count))
38235+ atomic_dec(&info->port.count);
38236 }
38237
38238 return retval;
38239@@ -7653,7 +7653,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38240 unsigned short new_crctype;
38241
38242 /* return error if TTY interface open */
38243- if (info->port.count)
38244+ if (atomic_read(&info->port.count))
38245 return -EBUSY;
38246
38247 switch (encoding)
38248@@ -7748,7 +7748,7 @@ static int hdlcdev_open(struct net_device *dev)
38249
38250 /* arbitrate between network and tty opens */
38251 spin_lock_irqsave(&info->netlock, flags);
38252- if (info->port.count != 0 || info->netcount != 0) {
38253+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38254 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
38255 spin_unlock_irqrestore(&info->netlock, flags);
38256 return -EBUSY;
38257@@ -7834,7 +7834,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38258 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
38259
38260 /* return error if TTY interface open */
38261- if (info->port.count)
38262+ if (atomic_read(&info->port.count))
38263 return -EBUSY;
38264
38265 if (cmd != SIOCWANDEV)
38266diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
38267index aa1debf..9297a16 100644
38268--- a/drivers/tty/synclink_gt.c
38269+++ b/drivers/tty/synclink_gt.c
38270@@ -671,7 +671,7 @@ static int open(struct tty_struct *tty, struct file *filp)
38271 tty->driver_data = info;
38272 info->port.tty = tty;
38273
38274- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
38275+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
38276
38277 /* If port is closing, signal caller to try again */
38278 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
38279@@ -692,10 +692,10 @@ static int open(struct tty_struct *tty, struct file *filp)
38280 mutex_unlock(&info->port.mutex);
38281 goto cleanup;
38282 }
38283- info->port.count++;
38284+ atomic_inc(&info->port.count);
38285 spin_unlock_irqrestore(&info->netlock, flags);
38286
38287- if (info->port.count == 1) {
38288+ if (atomic_read(&info->port.count) == 1) {
38289 /* 1st open on this device, init hardware */
38290 retval = startup(info);
38291 if (retval < 0) {
38292@@ -716,8 +716,8 @@ cleanup:
38293 if (retval) {
38294 if (tty->count == 1)
38295 info->port.tty = NULL; /* tty layer will release tty struct */
38296- if(info->port.count)
38297- info->port.count--;
38298+ if(atomic_read(&info->port.count))
38299+ atomic_dec(&info->port.count);
38300 }
38301
38302 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
38303@@ -730,7 +730,7 @@ static void close(struct tty_struct *tty, struct file *filp)
38304
38305 if (sanity_check(info, tty->name, "close"))
38306 return;
38307- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
38308+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
38309
38310 if (tty_port_close_start(&info->port, tty, filp) == 0)
38311 goto cleanup;
38312@@ -747,7 +747,7 @@ static void close(struct tty_struct *tty, struct file *filp)
38313 tty_port_close_end(&info->port, tty);
38314 info->port.tty = NULL;
38315 cleanup:
38316- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
38317+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
38318 }
38319
38320 static void hangup(struct tty_struct *tty)
38321@@ -765,7 +765,7 @@ static void hangup(struct tty_struct *tty)
38322 shutdown(info);
38323
38324 spin_lock_irqsave(&info->port.lock, flags);
38325- info->port.count = 0;
38326+ atomic_set(&info->port.count, 0);
38327 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
38328 info->port.tty = NULL;
38329 spin_unlock_irqrestore(&info->port.lock, flags);
38330@@ -1450,7 +1450,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38331 unsigned short new_crctype;
38332
38333 /* return error if TTY interface open */
38334- if (info->port.count)
38335+ if (atomic_read(&info->port.count))
38336 return -EBUSY;
38337
38338 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
38339@@ -1545,7 +1545,7 @@ static int hdlcdev_open(struct net_device *dev)
38340
38341 /* arbitrate between network and tty opens */
38342 spin_lock_irqsave(&info->netlock, flags);
38343- if (info->port.count != 0 || info->netcount != 0) {
38344+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38345 DBGINFO(("%s hdlc_open busy\n", dev->name));
38346 spin_unlock_irqrestore(&info->netlock, flags);
38347 return -EBUSY;
38348@@ -1630,7 +1630,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38349 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
38350
38351 /* return error if TTY interface open */
38352- if (info->port.count)
38353+ if (atomic_read(&info->port.count))
38354 return -EBUSY;
38355
38356 if (cmd != SIOCWANDEV)
38357@@ -2419,7 +2419,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
38358 if (port == NULL)
38359 continue;
38360 spin_lock(&port->lock);
38361- if ((port->port.count || port->netcount) &&
38362+ if ((atomic_read(&port->port.count) || port->netcount) &&
38363 port->pending_bh && !port->bh_running &&
38364 !port->bh_requested) {
38365 DBGISR(("%s bh queued\n", port->device_name));
38366@@ -3308,7 +3308,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
38367 spin_lock_irqsave(&info->lock, flags);
38368 if (!tty_hung_up_p(filp)) {
38369 extra_count = true;
38370- port->count--;
38371+ atomic_dec(&port->count);
38372 }
38373 spin_unlock_irqrestore(&info->lock, flags);
38374 port->blocked_open++;
38375@@ -3345,7 +3345,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
38376 remove_wait_queue(&port->open_wait, &wait);
38377
38378 if (extra_count)
38379- port->count++;
38380+ atomic_inc(&port->count);
38381 port->blocked_open--;
38382
38383 if (!retval)
38384diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
38385index a3dddc1..8905ab2 100644
38386--- a/drivers/tty/synclinkmp.c
38387+++ b/drivers/tty/synclinkmp.c
38388@@ -742,7 +742,7 @@ static int open(struct tty_struct *tty, struct file *filp)
38389
38390 if (debug_level >= DEBUG_LEVEL_INFO)
38391 printk("%s(%d):%s open(), old ref count = %d\n",
38392- __FILE__,__LINE__,tty->driver->name, info->port.count);
38393+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
38394
38395 /* If port is closing, signal caller to try again */
38396 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
38397@@ -761,10 +761,10 @@ static int open(struct tty_struct *tty, struct file *filp)
38398 spin_unlock_irqrestore(&info->netlock, flags);
38399 goto cleanup;
38400 }
38401- info->port.count++;
38402+ atomic_inc(&info->port.count);
38403 spin_unlock_irqrestore(&info->netlock, flags);
38404
38405- if (info->port.count == 1) {
38406+ if (atomic_read(&info->port.count) == 1) {
38407 /* 1st open on this device, init hardware */
38408 retval = startup(info);
38409 if (retval < 0)
38410@@ -788,8 +788,8 @@ cleanup:
38411 if (retval) {
38412 if (tty->count == 1)
38413 info->port.tty = NULL; /* tty layer will release tty struct */
38414- if(info->port.count)
38415- info->port.count--;
38416+ if(atomic_read(&info->port.count))
38417+ atomic_dec(&info->port.count);
38418 }
38419
38420 return retval;
38421@@ -807,7 +807,7 @@ static void close(struct tty_struct *tty, struct file *filp)
38422
38423 if (debug_level >= DEBUG_LEVEL_INFO)
38424 printk("%s(%d):%s close() entry, count=%d\n",
38425- __FILE__,__LINE__, info->device_name, info->port.count);
38426+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
38427
38428 if (tty_port_close_start(&info->port, tty, filp) == 0)
38429 goto cleanup;
38430@@ -826,7 +826,7 @@ static void close(struct tty_struct *tty, struct file *filp)
38431 cleanup:
38432 if (debug_level >= DEBUG_LEVEL_INFO)
38433 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
38434- tty->driver->name, info->port.count);
38435+ tty->driver->name, atomic_read(&info->port.count));
38436 }
38437
38438 /* Called by tty_hangup() when a hangup is signaled.
38439@@ -849,7 +849,7 @@ static void hangup(struct tty_struct *tty)
38440 shutdown(info);
38441
38442 spin_lock_irqsave(&info->port.lock, flags);
38443- info->port.count = 0;
38444+ atomic_set(&info->port.count, 0);
38445 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
38446 info->port.tty = NULL;
38447 spin_unlock_irqrestore(&info->port.lock, flags);
38448@@ -1557,7 +1557,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
38449 unsigned short new_crctype;
38450
38451 /* return error if TTY interface open */
38452- if (info->port.count)
38453+ if (atomic_read(&info->port.count))
38454 return -EBUSY;
38455
38456 switch (encoding)
38457@@ -1652,7 +1652,7 @@ static int hdlcdev_open(struct net_device *dev)
38458
38459 /* arbitrate between network and tty opens */
38460 spin_lock_irqsave(&info->netlock, flags);
38461- if (info->port.count != 0 || info->netcount != 0) {
38462+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
38463 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
38464 spin_unlock_irqrestore(&info->netlock, flags);
38465 return -EBUSY;
38466@@ -1738,7 +1738,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
38467 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
38468
38469 /* return error if TTY interface open */
38470- if (info->port.count)
38471+ if (atomic_read(&info->port.count))
38472 return -EBUSY;
38473
38474 if (cmd != SIOCWANDEV)
38475@@ -2623,7 +2623,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
38476 * do not request bottom half processing if the
38477 * device is not open in a normal mode.
38478 */
38479- if ( port && (port->port.count || port->netcount) &&
38480+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
38481 port->pending_bh && !port->bh_running &&
38482 !port->bh_requested ) {
38483 if ( debug_level >= DEBUG_LEVEL_ISR )
38484@@ -3321,12 +3321,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
38485
38486 if (debug_level >= DEBUG_LEVEL_INFO)
38487 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
38488- __FILE__,__LINE__, tty->driver->name, port->count );
38489+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38490
38491 spin_lock_irqsave(&info->lock, flags);
38492 if (!tty_hung_up_p(filp)) {
38493 extra_count = true;
38494- port->count--;
38495+ atomic_dec(&port->count);
38496 }
38497 spin_unlock_irqrestore(&info->lock, flags);
38498 port->blocked_open++;
38499@@ -3355,7 +3355,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
38500
38501 if (debug_level >= DEBUG_LEVEL_INFO)
38502 printk("%s(%d):%s block_til_ready() count=%d\n",
38503- __FILE__,__LINE__, tty->driver->name, port->count );
38504+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38505
38506 tty_unlock();
38507 schedule();
38508@@ -3366,12 +3366,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
38509 remove_wait_queue(&port->open_wait, &wait);
38510
38511 if (extra_count)
38512- port->count++;
38513+ atomic_inc(&port->count);
38514 port->blocked_open--;
38515
38516 if (debug_level >= DEBUG_LEVEL_INFO)
38517 printk("%s(%d):%s block_til_ready() after, count=%d\n",
38518- __FILE__,__LINE__, tty->driver->name, port->count );
38519+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
38520
38521 if (!retval)
38522 port->flags |= ASYNC_NORMAL_ACTIVE;
38523diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
38524index 05728894..b9d44c6 100644
38525--- a/drivers/tty/sysrq.c
38526+++ b/drivers/tty/sysrq.c
38527@@ -865,7 +865,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
38528 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
38529 size_t count, loff_t *ppos)
38530 {
38531- if (count) {
38532+ if (count && capable(CAP_SYS_ADMIN)) {
38533 char c;
38534
38535 if (get_user(c, buf))
38536diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
38537index b425c79..08a3f06 100644
38538--- a/drivers/tty/tty_io.c
38539+++ b/drivers/tty/tty_io.c
38540@@ -3283,7 +3283,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
38541
38542 void tty_default_fops(struct file_operations *fops)
38543 {
38544- *fops = tty_fops;
38545+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
38546 }
38547
38548 /*
38549diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
38550index 9911eb6..5abe0e1 100644
38551--- a/drivers/tty/tty_ldisc.c
38552+++ b/drivers/tty/tty_ldisc.c
38553@@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
38554 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
38555 struct tty_ldisc_ops *ldo = ld->ops;
38556
38557- ldo->refcount--;
38558+ atomic_dec(&ldo->refcount);
38559 module_put(ldo->owner);
38560 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38561
38562@@ -91,7 +91,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
38563 spin_lock_irqsave(&tty_ldisc_lock, flags);
38564 tty_ldiscs[disc] = new_ldisc;
38565 new_ldisc->num = disc;
38566- new_ldisc->refcount = 0;
38567+ atomic_set(&new_ldisc->refcount, 0);
38568 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38569
38570 return ret;
38571@@ -119,7 +119,7 @@ int tty_unregister_ldisc(int disc)
38572 return -EINVAL;
38573
38574 spin_lock_irqsave(&tty_ldisc_lock, flags);
38575- if (tty_ldiscs[disc]->refcount)
38576+ if (atomic_read(&tty_ldiscs[disc]->refcount))
38577 ret = -EBUSY;
38578 else
38579 tty_ldiscs[disc] = NULL;
38580@@ -140,7 +140,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
38581 if (ldops) {
38582 ret = ERR_PTR(-EAGAIN);
38583 if (try_module_get(ldops->owner)) {
38584- ldops->refcount++;
38585+ atomic_inc(&ldops->refcount);
38586 ret = ldops;
38587 }
38588 }
38589@@ -153,7 +153,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
38590 unsigned long flags;
38591
38592 spin_lock_irqsave(&tty_ldisc_lock, flags);
38593- ldops->refcount--;
38594+ atomic_dec(&ldops->refcount);
38595 module_put(ldops->owner);
38596 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38597 }
38598diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
38599index bf6e238..d401c04 100644
38600--- a/drivers/tty/tty_port.c
38601+++ b/drivers/tty/tty_port.c
38602@@ -138,7 +138,7 @@ void tty_port_hangup(struct tty_port *port)
38603 unsigned long flags;
38604
38605 spin_lock_irqsave(&port->lock, flags);
38606- port->count = 0;
38607+ atomic_set(&port->count, 0);
38608 port->flags &= ~ASYNC_NORMAL_ACTIVE;
38609 if (port->tty) {
38610 set_bit(TTY_IO_ERROR, &port->tty->flags);
38611@@ -264,7 +264,7 @@ int tty_port_block_til_ready(struct tty_port *port,
38612 /* The port lock protects the port counts */
38613 spin_lock_irqsave(&port->lock, flags);
38614 if (!tty_hung_up_p(filp))
38615- port->count--;
38616+ atomic_dec(&port->count);
38617 port->blocked_open++;
38618 spin_unlock_irqrestore(&port->lock, flags);
38619
38620@@ -306,7 +306,7 @@ int tty_port_block_til_ready(struct tty_port *port,
38621 we must not mess that up further */
38622 spin_lock_irqsave(&port->lock, flags);
38623 if (!tty_hung_up_p(filp))
38624- port->count++;
38625+ atomic_inc(&port->count);
38626 port->blocked_open--;
38627 if (retval == 0)
38628 port->flags |= ASYNC_NORMAL_ACTIVE;
38629@@ -326,19 +326,19 @@ int tty_port_close_start(struct tty_port *port,
38630 return 0;
38631 }
38632
38633- if (tty->count == 1 && port->count != 1) {
38634+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
38635 printk(KERN_WARNING
38636 "tty_port_close_start: tty->count = 1 port count = %d.\n",
38637- port->count);
38638- port->count = 1;
38639+ atomic_read(&port->count));
38640+ atomic_set(&port->count, 1);
38641 }
38642- if (--port->count < 0) {
38643+ if (atomic_dec_return(&port->count) < 0) {
38644 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
38645- port->count);
38646- port->count = 0;
38647+ atomic_read(&port->count));
38648+ atomic_set(&port->count, 0);
38649 }
38650
38651- if (port->count) {
38652+ if (atomic_read(&port->count)) {
38653 spin_unlock_irqrestore(&port->lock, flags);
38654 if (port->ops->drop)
38655 port->ops->drop(port);
38656@@ -418,7 +418,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
38657 {
38658 spin_lock_irq(&port->lock);
38659 if (!tty_hung_up_p(filp))
38660- ++port->count;
38661+ atomic_inc(&port->count);
38662 spin_unlock_irq(&port->lock);
38663 tty_port_tty_set(port, tty);
38664
38665diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
38666index 48cc6f2..85584dd 100644
38667--- a/drivers/tty/vt/keyboard.c
38668+++ b/drivers/tty/vt/keyboard.c
38669@@ -659,6 +659,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
38670 kbd->kbdmode == VC_OFF) &&
38671 value != KVAL(K_SAK))
38672 return; /* SAK is allowed even in raw mode */
38673+
38674+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
38675+ {
38676+ void *func = fn_handler[value];
38677+ if (func == fn_show_state || func == fn_show_ptregs ||
38678+ func == fn_show_mem)
38679+ return;
38680+ }
38681+#endif
38682+
38683 fn_handler[value](vc);
38684 }
38685
38686@@ -1808,9 +1818,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
38687 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
38688 return -EFAULT;
38689
38690- if (!capable(CAP_SYS_TTY_CONFIG))
38691- perm = 0;
38692-
38693 switch (cmd) {
38694 case KDGKBENT:
38695 /* Ensure another thread doesn't free it under us */
38696@@ -1825,6 +1832,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
38697 spin_unlock_irqrestore(&kbd_event_lock, flags);
38698 return put_user(val, &user_kbe->kb_value);
38699 case KDSKBENT:
38700+ if (!capable(CAP_SYS_TTY_CONFIG))
38701+ perm = 0;
38702+
38703 if (!perm)
38704 return -EPERM;
38705 if (!i && v == K_NOSUCHMAP) {
38706@@ -1915,9 +1925,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38707 int i, j, k;
38708 int ret;
38709
38710- if (!capable(CAP_SYS_TTY_CONFIG))
38711- perm = 0;
38712-
38713 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
38714 if (!kbs) {
38715 ret = -ENOMEM;
38716@@ -1951,6 +1958,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38717 kfree(kbs);
38718 return ((p && *p) ? -EOVERFLOW : 0);
38719 case KDSKBSENT:
38720+ if (!capable(CAP_SYS_TTY_CONFIG))
38721+ perm = 0;
38722+
38723 if (!perm) {
38724 ret = -EPERM;
38725 goto reterr;
38726diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
38727index a783d53..cb30d94 100644
38728--- a/drivers/uio/uio.c
38729+++ b/drivers/uio/uio.c
38730@@ -25,6 +25,7 @@
38731 #include <linux/kobject.h>
38732 #include <linux/cdev.h>
38733 #include <linux/uio_driver.h>
38734+#include <asm/local.h>
38735
38736 #define UIO_MAX_DEVICES (1U << MINORBITS)
38737
38738@@ -32,10 +33,10 @@ struct uio_device {
38739 struct module *owner;
38740 struct device *dev;
38741 int minor;
38742- atomic_t event;
38743+ atomic_unchecked_t event;
38744 struct fasync_struct *async_queue;
38745 wait_queue_head_t wait;
38746- int vma_count;
38747+ local_t vma_count;
38748 struct uio_info *info;
38749 struct kobject *map_dir;
38750 struct kobject *portio_dir;
38751@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
38752 struct device_attribute *attr, char *buf)
38753 {
38754 struct uio_device *idev = dev_get_drvdata(dev);
38755- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
38756+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
38757 }
38758
38759 static struct device_attribute uio_class_attributes[] = {
38760@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
38761 {
38762 struct uio_device *idev = info->uio_dev;
38763
38764- atomic_inc(&idev->event);
38765+ atomic_inc_unchecked(&idev->event);
38766 wake_up_interruptible(&idev->wait);
38767 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38768 }
38769@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
38770 }
38771
38772 listener->dev = idev;
38773- listener->event_count = atomic_read(&idev->event);
38774+ listener->event_count = atomic_read_unchecked(&idev->event);
38775 filep->private_data = listener;
38776
38777 if (idev->info->open) {
38778@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
38779 return -EIO;
38780
38781 poll_wait(filep, &idev->wait, wait);
38782- if (listener->event_count != atomic_read(&idev->event))
38783+ if (listener->event_count != atomic_read_unchecked(&idev->event))
38784 return POLLIN | POLLRDNORM;
38785 return 0;
38786 }
38787@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
38788 do {
38789 set_current_state(TASK_INTERRUPTIBLE);
38790
38791- event_count = atomic_read(&idev->event);
38792+ event_count = atomic_read_unchecked(&idev->event);
38793 if (event_count != listener->event_count) {
38794 if (copy_to_user(buf, &event_count, count))
38795 retval = -EFAULT;
38796@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
38797 static void uio_vma_open(struct vm_area_struct *vma)
38798 {
38799 struct uio_device *idev = vma->vm_private_data;
38800- idev->vma_count++;
38801+ local_inc(&idev->vma_count);
38802 }
38803
38804 static void uio_vma_close(struct vm_area_struct *vma)
38805 {
38806 struct uio_device *idev = vma->vm_private_data;
38807- idev->vma_count--;
38808+ local_dec(&idev->vma_count);
38809 }
38810
38811 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38812@@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
38813 idev->owner = owner;
38814 idev->info = info;
38815 init_waitqueue_head(&idev->wait);
38816- atomic_set(&idev->event, 0);
38817+ atomic_set_unchecked(&idev->event, 0);
38818
38819 ret = uio_get_minor(idev);
38820 if (ret)
38821diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
38822index b7eb86a..36d28af 100644
38823--- a/drivers/usb/atm/cxacru.c
38824+++ b/drivers/usb/atm/cxacru.c
38825@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
38826 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
38827 if (ret < 2)
38828 return -EINVAL;
38829- if (index < 0 || index > 0x7f)
38830+ if (index > 0x7f)
38831 return -EINVAL;
38832 pos += tmp;
38833
38834diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
38835index ee62b35..b663594 100644
38836--- a/drivers/usb/atm/usbatm.c
38837+++ b/drivers/usb/atm/usbatm.c
38838@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38839 if (printk_ratelimit())
38840 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38841 __func__, vpi, vci);
38842- atomic_inc(&vcc->stats->rx_err);
38843+ atomic_inc_unchecked(&vcc->stats->rx_err);
38844 return;
38845 }
38846
38847@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38848 if (length > ATM_MAX_AAL5_PDU) {
38849 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38850 __func__, length, vcc);
38851- atomic_inc(&vcc->stats->rx_err);
38852+ atomic_inc_unchecked(&vcc->stats->rx_err);
38853 goto out;
38854 }
38855
38856@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38857 if (sarb->len < pdu_length) {
38858 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38859 __func__, pdu_length, sarb->len, vcc);
38860- atomic_inc(&vcc->stats->rx_err);
38861+ atomic_inc_unchecked(&vcc->stats->rx_err);
38862 goto out;
38863 }
38864
38865 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38866 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38867 __func__, vcc);
38868- atomic_inc(&vcc->stats->rx_err);
38869+ atomic_inc_unchecked(&vcc->stats->rx_err);
38870 goto out;
38871 }
38872
38873@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38874 if (printk_ratelimit())
38875 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38876 __func__, length);
38877- atomic_inc(&vcc->stats->rx_drop);
38878+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38879 goto out;
38880 }
38881
38882@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38883
38884 vcc->push(vcc, skb);
38885
38886- atomic_inc(&vcc->stats->rx);
38887+ atomic_inc_unchecked(&vcc->stats->rx);
38888 out:
38889 skb_trim(sarb, 0);
38890 }
38891@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
38892 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38893
38894 usbatm_pop(vcc, skb);
38895- atomic_inc(&vcc->stats->tx);
38896+ atomic_inc_unchecked(&vcc->stats->tx);
38897
38898 skb = skb_dequeue(&instance->sndqueue);
38899 }
38900@@ -770,11 +770,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
38901 if (!left--)
38902 return sprintf(page,
38903 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38904- atomic_read(&atm_dev->stats.aal5.tx),
38905- atomic_read(&atm_dev->stats.aal5.tx_err),
38906- atomic_read(&atm_dev->stats.aal5.rx),
38907- atomic_read(&atm_dev->stats.aal5.rx_err),
38908- atomic_read(&atm_dev->stats.aal5.rx_drop));
38909+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38910+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38911+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38912+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38913+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38914
38915 if (!left--) {
38916 if (instance->disconnected)
38917diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
38918index d956965..4179a77 100644
38919--- a/drivers/usb/core/devices.c
38920+++ b/drivers/usb/core/devices.c
38921@@ -126,7 +126,7 @@ static const char format_endpt[] =
38922 * time it gets called.
38923 */
38924 static struct device_connect_event {
38925- atomic_t count;
38926+ atomic_unchecked_t count;
38927 wait_queue_head_t wait;
38928 } device_event = {
38929 .count = ATOMIC_INIT(1),
38930@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
38931
38932 void usbfs_conn_disc_event(void)
38933 {
38934- atomic_add(2, &device_event.count);
38935+ atomic_add_unchecked(2, &device_event.count);
38936 wake_up(&device_event.wait);
38937 }
38938
38939@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
38940
38941 poll_wait(file, &device_event.wait, wait);
38942
38943- event_count = atomic_read(&device_event.count);
38944+ event_count = atomic_read_unchecked(&device_event.count);
38945 if (file->f_version != event_count) {
38946 file->f_version = event_count;
38947 return POLLIN | POLLRDNORM;
38948diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38949index 347bb05..63e1b73 100644
38950--- a/drivers/usb/early/ehci-dbgp.c
38951+++ b/drivers/usb/early/ehci-dbgp.c
38952@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
38953
38954 #ifdef CONFIG_KGDB
38955 static struct kgdb_io kgdbdbgp_io_ops;
38956-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38957+static struct kgdb_io kgdbdbgp_io_ops_console;
38958+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
38959 #else
38960 #define dbgp_kgdb_mode (0)
38961 #endif
38962@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
38963 .write_char = kgdbdbgp_write_char,
38964 };
38965
38966+static struct kgdb_io kgdbdbgp_io_ops_console = {
38967+ .name = "kgdbdbgp",
38968+ .read_char = kgdbdbgp_read_char,
38969+ .write_char = kgdbdbgp_write_char,
38970+ .is_console = 1
38971+};
38972+
38973 static int kgdbdbgp_wait_time;
38974
38975 static int __init kgdbdbgp_parse_config(char *str)
38976@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
38977 ptr++;
38978 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38979 }
38980- kgdb_register_io_module(&kgdbdbgp_io_ops);
38981- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38982+ if (early_dbgp_console.index != -1)
38983+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38984+ else
38985+ kgdb_register_io_module(&kgdbdbgp_io_ops);
38986
38987 return 0;
38988 }
38989diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
38990index 5b3f5ff..6e00893 100644
38991--- a/drivers/usb/gadget/u_serial.c
38992+++ b/drivers/usb/gadget/u_serial.c
38993@@ -731,9 +731,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
38994 spin_lock_irq(&port->port_lock);
38995
38996 /* already open? Great. */
38997- if (port->port.count) {
38998+ if (atomic_read(&port->port.count)) {
38999 status = 0;
39000- port->port.count++;
39001+ atomic_inc(&port->port.count);
39002
39003 /* currently opening/closing? wait ... */
39004 } else if (port->openclose) {
39005@@ -792,7 +792,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
39006 tty->driver_data = port;
39007 port->port.tty = tty;
39008
39009- port->port.count = 1;
39010+ atomic_set(&port->port.count, 1);
39011 port->openclose = false;
39012
39013 /* if connected, start the I/O stream */
39014@@ -834,11 +834,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
39015
39016 spin_lock_irq(&port->port_lock);
39017
39018- if (port->port.count != 1) {
39019- if (port->port.count == 0)
39020+ if (atomic_read(&port->port.count) != 1) {
39021+ if (atomic_read(&port->port.count) == 0)
39022 WARN_ON(1);
39023 else
39024- --port->port.count;
39025+ atomic_dec(&port->port.count);
39026 goto exit;
39027 }
39028
39029@@ -848,7 +848,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
39030 * and sleep if necessary
39031 */
39032 port->openclose = true;
39033- port->port.count = 0;
39034+ atomic_set(&port->port.count, 0);
39035
39036 gser = port->port_usb;
39037 if (gser && gser->disconnect)
39038@@ -1152,7 +1152,7 @@ static int gs_closed(struct gs_port *port)
39039 int cond;
39040
39041 spin_lock_irq(&port->port_lock);
39042- cond = (port->port.count == 0) && !port->openclose;
39043+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
39044 spin_unlock_irq(&port->port_lock);
39045 return cond;
39046 }
39047@@ -1265,7 +1265,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
39048 /* if it's already open, start I/O ... and notify the serial
39049 * protocol about open/close status (connect/disconnect).
39050 */
39051- if (port->port.count) {
39052+ if (atomic_read(&port->port.count)) {
39053 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
39054 gs_start_io(port);
39055 if (gser->connect)
39056@@ -1312,7 +1312,7 @@ void gserial_disconnect(struct gserial *gser)
39057
39058 port->port_usb = NULL;
39059 gser->ioport = NULL;
39060- if (port->port.count > 0 || port->openclose) {
39061+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
39062 wake_up_interruptible(&port->drain_wait);
39063 if (port->port.tty)
39064 tty_hangup(port->port.tty);
39065@@ -1328,7 +1328,7 @@ void gserial_disconnect(struct gserial *gser)
39066
39067 /* finally, free any unused/unusable I/O buffers */
39068 spin_lock_irqsave(&port->port_lock, flags);
39069- if (port->port.count == 0 && !port->openclose)
39070+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
39071 gs_buf_free(&port->port_write_buf);
39072 gs_free_requests(gser->out, &port->read_pool, NULL);
39073 gs_free_requests(gser->out, &port->read_queue, NULL);
39074diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
39075index b9cca6d..75c75df 100644
39076--- a/drivers/usb/serial/console.c
39077+++ b/drivers/usb/serial/console.c
39078@@ -127,7 +127,7 @@ static int usb_console_setup(struct console *co, char *options)
39079
39080 info->port = port;
39081
39082- ++port->port.count;
39083+ atomic_inc(&port->port.count);
39084 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
39085 if (serial->type->set_termios) {
39086 /*
39087@@ -177,7 +177,7 @@ static int usb_console_setup(struct console *co, char *options)
39088 }
39089 /* Now that any required fake tty operations are completed restore
39090 * the tty port count */
39091- --port->port.count;
39092+ atomic_dec(&port->port.count);
39093 /* The console is special in terms of closing the device so
39094 * indicate this port is now acting as a system console. */
39095 port->port.console = 1;
39096@@ -190,7 +190,7 @@ static int usb_console_setup(struct console *co, char *options)
39097 free_tty:
39098 kfree(tty);
39099 reset_open_count:
39100- port->port.count = 0;
39101+ atomic_set(&port->port.count, 0);
39102 usb_autopm_put_interface(serial->interface);
39103 error_get_interface:
39104 usb_serial_put(serial);
39105diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
39106index d6bea3e..60b250e 100644
39107--- a/drivers/usb/wusbcore/wa-hc.h
39108+++ b/drivers/usb/wusbcore/wa-hc.h
39109@@ -192,7 +192,7 @@ struct wahc {
39110 struct list_head xfer_delayed_list;
39111 spinlock_t xfer_list_lock;
39112 struct work_struct xfer_work;
39113- atomic_t xfer_id_count;
39114+ atomic_unchecked_t xfer_id_count;
39115 };
39116
39117
39118@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
39119 INIT_LIST_HEAD(&wa->xfer_delayed_list);
39120 spin_lock_init(&wa->xfer_list_lock);
39121 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
39122- atomic_set(&wa->xfer_id_count, 1);
39123+ atomic_set_unchecked(&wa->xfer_id_count, 1);
39124 }
39125
39126 /**
39127diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
39128index 57c01ab..8a05959 100644
39129--- a/drivers/usb/wusbcore/wa-xfer.c
39130+++ b/drivers/usb/wusbcore/wa-xfer.c
39131@@ -296,7 +296,7 @@ out:
39132 */
39133 static void wa_xfer_id_init(struct wa_xfer *xfer)
39134 {
39135- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
39136+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
39137 }
39138
39139 /*
39140diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
39141index 112156f..eb81154 100644
39142--- a/drivers/vhost/vhost.c
39143+++ b/drivers/vhost/vhost.c
39144@@ -635,7 +635,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
39145 return 0;
39146 }
39147
39148-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
39149+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
39150 {
39151 struct file *eventfp, *filep = NULL,
39152 *pollstart = NULL, *pollstop = NULL;
39153diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
39154index b0b2ac3..89a4399 100644
39155--- a/drivers/video/aty/aty128fb.c
39156+++ b/drivers/video/aty/aty128fb.c
39157@@ -148,7 +148,7 @@ enum {
39158 };
39159
39160 /* Must match above enum */
39161-static const char *r128_family[] __devinitdata = {
39162+static const char *r128_family[] __devinitconst = {
39163 "AGP",
39164 "PCI",
39165 "PRO AGP",
39166diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
39167index 2e471c2..f00eb3e 100644
39168--- a/drivers/video/console/fbcon.c
39169+++ b/drivers/video/console/fbcon.c
39170@@ -442,7 +442,7 @@ static int __init fb_console_setup(char *this_opt)
39171
39172 while ((options = strsep(&this_opt, ",")) != NULL) {
39173 if (!strncmp(options, "font:", 5))
39174- strcpy(fontname, options + 5);
39175+ strlcpy(fontname, options + 5, sizeof(fontname));
39176
39177 if (!strncmp(options, "scrollback:", 11)) {
39178 options += 11;
39179diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
39180index 5c3960d..15cf8fc 100644
39181--- a/drivers/video/fbcmap.c
39182+++ b/drivers/video/fbcmap.c
39183@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
39184 rc = -ENODEV;
39185 goto out;
39186 }
39187- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
39188- !info->fbops->fb_setcmap)) {
39189+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
39190 rc = -EINVAL;
39191 goto out1;
39192 }
39193diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
39194index 0dff12a..2ef47b3 100644
39195--- a/drivers/video/fbmem.c
39196+++ b/drivers/video/fbmem.c
39197@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
39198 image->dx += image->width + 8;
39199 }
39200 } else if (rotate == FB_ROTATE_UD) {
39201- for (x = 0; x < num && image->dx >= 0; x++) {
39202+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
39203 info->fbops->fb_imageblit(info, image);
39204 image->dx -= image->width + 8;
39205 }
39206@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
39207 image->dy += image->height + 8;
39208 }
39209 } else if (rotate == FB_ROTATE_CCW) {
39210- for (x = 0; x < num && image->dy >= 0; x++) {
39211+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
39212 info->fbops->fb_imageblit(info, image);
39213 image->dy -= image->height + 8;
39214 }
39215@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
39216 return -EFAULT;
39217 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
39218 return -EINVAL;
39219- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
39220+ if (con2fb.framebuffer >= FB_MAX)
39221 return -EINVAL;
39222 if (!registered_fb[con2fb.framebuffer])
39223 request_module("fb%d", con2fb.framebuffer);
39224diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
39225index 5a5d092..265c5ed 100644
39226--- a/drivers/video/geode/gx1fb_core.c
39227+++ b/drivers/video/geode/gx1fb_core.c
39228@@ -29,7 +29,7 @@ static int crt_option = 1;
39229 static char panel_option[32] = "";
39230
39231 /* Modes relevant to the GX1 (taken from modedb.c) */
39232-static const struct fb_videomode __devinitdata gx1_modedb[] = {
39233+static const struct fb_videomode __devinitconst gx1_modedb[] = {
39234 /* 640x480-60 VESA */
39235 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
39236 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
39237diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
39238index 0fad23f..0e9afa4 100644
39239--- a/drivers/video/gxt4500.c
39240+++ b/drivers/video/gxt4500.c
39241@@ -156,7 +156,7 @@ struct gxt4500_par {
39242 static char *mode_option;
39243
39244 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
39245-static const struct fb_videomode defaultmode __devinitdata = {
39246+static const struct fb_videomode defaultmode __devinitconst = {
39247 .refresh = 60,
39248 .xres = 1280,
39249 .yres = 1024,
39250@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
39251 return 0;
39252 }
39253
39254-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
39255+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
39256 .id = "IBM GXT4500P",
39257 .type = FB_TYPE_PACKED_PIXELS,
39258 .visual = FB_VISUAL_PSEUDOCOLOR,
39259diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
39260index 7672d2e..b56437f 100644
39261--- a/drivers/video/i810/i810_accel.c
39262+++ b/drivers/video/i810/i810_accel.c
39263@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
39264 }
39265 }
39266 printk("ringbuffer lockup!!!\n");
39267+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
39268 i810_report_error(mmio);
39269 par->dev_flags |= LOCKUP;
39270 info->pixmap.scan_align = 1;
39271diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
39272index b83f361..2b05a91 100644
39273--- a/drivers/video/i810/i810_main.c
39274+++ b/drivers/video/i810/i810_main.c
39275@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
39276 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
39277
39278 /* PCI */
39279-static const char *i810_pci_list[] __devinitdata = {
39280+static const char *i810_pci_list[] __devinitconst = {
39281 "Intel(R) 810 Framebuffer Device" ,
39282 "Intel(R) 810-DC100 Framebuffer Device" ,
39283 "Intel(R) 810E Framebuffer Device" ,
39284diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
39285index de36693..3c63fc2 100644
39286--- a/drivers/video/jz4740_fb.c
39287+++ b/drivers/video/jz4740_fb.c
39288@@ -136,7 +136,7 @@ struct jzfb {
39289 uint32_t pseudo_palette[16];
39290 };
39291
39292-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
39293+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
39294 .id = "JZ4740 FB",
39295 .type = FB_TYPE_PACKED_PIXELS,
39296 .visual = FB_VISUAL_TRUECOLOR,
39297diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
39298index 3c14e43..eafa544 100644
39299--- a/drivers/video/logo/logo_linux_clut224.ppm
39300+++ b/drivers/video/logo/logo_linux_clut224.ppm
39301@@ -1,1604 +1,1123 @@
39302 P3
39303-# Standard 224-color Linux logo
39304 80 80
39305 255
39306- 0 0 0 0 0 0 0 0 0 0 0 0
39307- 0 0 0 0 0 0 0 0 0 0 0 0
39308- 0 0 0 0 0 0 0 0 0 0 0 0
39309- 0 0 0 0 0 0 0 0 0 0 0 0
39310- 0 0 0 0 0 0 0 0 0 0 0 0
39311- 0 0 0 0 0 0 0 0 0 0 0 0
39312- 0 0 0 0 0 0 0 0 0 0 0 0
39313- 0 0 0 0 0 0 0 0 0 0 0 0
39314- 0 0 0 0 0 0 0 0 0 0 0 0
39315- 6 6 6 6 6 6 10 10 10 10 10 10
39316- 10 10 10 6 6 6 6 6 6 6 6 6
39317- 0 0 0 0 0 0 0 0 0 0 0 0
39318- 0 0 0 0 0 0 0 0 0 0 0 0
39319- 0 0 0 0 0 0 0 0 0 0 0 0
39320- 0 0 0 0 0 0 0 0 0 0 0 0
39321- 0 0 0 0 0 0 0 0 0 0 0 0
39322- 0 0 0 0 0 0 0 0 0 0 0 0
39323- 0 0 0 0 0 0 0 0 0 0 0 0
39324- 0 0 0 0 0 0 0 0 0 0 0 0
39325- 0 0 0 0 0 0 0 0 0 0 0 0
39326- 0 0 0 0 0 0 0 0 0 0 0 0
39327- 0 0 0 0 0 0 0 0 0 0 0 0
39328- 0 0 0 0 0 0 0 0 0 0 0 0
39329- 0 0 0 0 0 0 0 0 0 0 0 0
39330- 0 0 0 0 0 0 0 0 0 0 0 0
39331- 0 0 0 0 0 0 0 0 0 0 0 0
39332- 0 0 0 0 0 0 0 0 0 0 0 0
39333- 0 0 0 0 0 0 0 0 0 0 0 0
39334- 0 0 0 6 6 6 10 10 10 14 14 14
39335- 22 22 22 26 26 26 30 30 30 34 34 34
39336- 30 30 30 30 30 30 26 26 26 18 18 18
39337- 14 14 14 10 10 10 6 6 6 0 0 0
39338- 0 0 0 0 0 0 0 0 0 0 0 0
39339- 0 0 0 0 0 0 0 0 0 0 0 0
39340- 0 0 0 0 0 0 0 0 0 0 0 0
39341- 0 0 0 0 0 0 0 0 0 0 0 0
39342- 0 0 0 0 0 0 0 0 0 0 0 0
39343- 0 0 0 0 0 0 0 0 0 0 0 0
39344- 0 0 0 0 0 0 0 0 0 0 0 0
39345- 0 0 0 0 0 0 0 0 0 0 0 0
39346- 0 0 0 0 0 0 0 0 0 0 0 0
39347- 0 0 0 0 0 1 0 0 1 0 0 0
39348- 0 0 0 0 0 0 0 0 0 0 0 0
39349- 0 0 0 0 0 0 0 0 0 0 0 0
39350- 0 0 0 0 0 0 0 0 0 0 0 0
39351- 0 0 0 0 0 0 0 0 0 0 0 0
39352- 0 0 0 0 0 0 0 0 0 0 0 0
39353- 0 0 0 0 0 0 0 0 0 0 0 0
39354- 6 6 6 14 14 14 26 26 26 42 42 42
39355- 54 54 54 66 66 66 78 78 78 78 78 78
39356- 78 78 78 74 74 74 66 66 66 54 54 54
39357- 42 42 42 26 26 26 18 18 18 10 10 10
39358- 6 6 6 0 0 0 0 0 0 0 0 0
39359- 0 0 0 0 0 0 0 0 0 0 0 0
39360- 0 0 0 0 0 0 0 0 0 0 0 0
39361- 0 0 0 0 0 0 0 0 0 0 0 0
39362- 0 0 0 0 0 0 0 0 0 0 0 0
39363- 0 0 0 0 0 0 0 0 0 0 0 0
39364- 0 0 0 0 0 0 0 0 0 0 0 0
39365- 0 0 0 0 0 0 0 0 0 0 0 0
39366- 0 0 0 0 0 0 0 0 0 0 0 0
39367- 0 0 1 0 0 0 0 0 0 0 0 0
39368- 0 0 0 0 0 0 0 0 0 0 0 0
39369- 0 0 0 0 0 0 0 0 0 0 0 0
39370- 0 0 0 0 0 0 0 0 0 0 0 0
39371- 0 0 0 0 0 0 0 0 0 0 0 0
39372- 0 0 0 0 0 0 0 0 0 0 0 0
39373- 0 0 0 0 0 0 0 0 0 10 10 10
39374- 22 22 22 42 42 42 66 66 66 86 86 86
39375- 66 66 66 38 38 38 38 38 38 22 22 22
39376- 26 26 26 34 34 34 54 54 54 66 66 66
39377- 86 86 86 70 70 70 46 46 46 26 26 26
39378- 14 14 14 6 6 6 0 0 0 0 0 0
39379- 0 0 0 0 0 0 0 0 0 0 0 0
39380- 0 0 0 0 0 0 0 0 0 0 0 0
39381- 0 0 0 0 0 0 0 0 0 0 0 0
39382- 0 0 0 0 0 0 0 0 0 0 0 0
39383- 0 0 0 0 0 0 0 0 0 0 0 0
39384- 0 0 0 0 0 0 0 0 0 0 0 0
39385- 0 0 0 0 0 0 0 0 0 0 0 0
39386- 0 0 0 0 0 0 0 0 0 0 0 0
39387- 0 0 1 0 0 1 0 0 1 0 0 0
39388- 0 0 0 0 0 0 0 0 0 0 0 0
39389- 0 0 0 0 0 0 0 0 0 0 0 0
39390- 0 0 0 0 0 0 0 0 0 0 0 0
39391- 0 0 0 0 0 0 0 0 0 0 0 0
39392- 0 0 0 0 0 0 0 0 0 0 0 0
39393- 0 0 0 0 0 0 10 10 10 26 26 26
39394- 50 50 50 82 82 82 58 58 58 6 6 6
39395- 2 2 6 2 2 6 2 2 6 2 2 6
39396- 2 2 6 2 2 6 2 2 6 2 2 6
39397- 6 6 6 54 54 54 86 86 86 66 66 66
39398- 38 38 38 18 18 18 6 6 6 0 0 0
39399- 0 0 0 0 0 0 0 0 0 0 0 0
39400- 0 0 0 0 0 0 0 0 0 0 0 0
39401- 0 0 0 0 0 0 0 0 0 0 0 0
39402- 0 0 0 0 0 0 0 0 0 0 0 0
39403- 0 0 0 0 0 0 0 0 0 0 0 0
39404- 0 0 0 0 0 0 0 0 0 0 0 0
39405- 0 0 0 0 0 0 0 0 0 0 0 0
39406- 0 0 0 0 0 0 0 0 0 0 0 0
39407- 0 0 0 0 0 0 0 0 0 0 0 0
39408- 0 0 0 0 0 0 0 0 0 0 0 0
39409- 0 0 0 0 0 0 0 0 0 0 0 0
39410- 0 0 0 0 0 0 0 0 0 0 0 0
39411- 0 0 0 0 0 0 0 0 0 0 0 0
39412- 0 0 0 0 0 0 0 0 0 0 0 0
39413- 0 0 0 6 6 6 22 22 22 50 50 50
39414- 78 78 78 34 34 34 2 2 6 2 2 6
39415- 2 2 6 2 2 6 2 2 6 2 2 6
39416- 2 2 6 2 2 6 2 2 6 2 2 6
39417- 2 2 6 2 2 6 6 6 6 70 70 70
39418- 78 78 78 46 46 46 22 22 22 6 6 6
39419- 0 0 0 0 0 0 0 0 0 0 0 0
39420- 0 0 0 0 0 0 0 0 0 0 0 0
39421- 0 0 0 0 0 0 0 0 0 0 0 0
39422- 0 0 0 0 0 0 0 0 0 0 0 0
39423- 0 0 0 0 0 0 0 0 0 0 0 0
39424- 0 0 0 0 0 0 0 0 0 0 0 0
39425- 0 0 0 0 0 0 0 0 0 0 0 0
39426- 0 0 0 0 0 0 0 0 0 0 0 0
39427- 0 0 1 0 0 1 0 0 1 0 0 0
39428- 0 0 0 0 0 0 0 0 0 0 0 0
39429- 0 0 0 0 0 0 0 0 0 0 0 0
39430- 0 0 0 0 0 0 0 0 0 0 0 0
39431- 0 0 0 0 0 0 0 0 0 0 0 0
39432- 0 0 0 0 0 0 0 0 0 0 0 0
39433- 6 6 6 18 18 18 42 42 42 82 82 82
39434- 26 26 26 2 2 6 2 2 6 2 2 6
39435- 2 2 6 2 2 6 2 2 6 2 2 6
39436- 2 2 6 2 2 6 2 2 6 14 14 14
39437- 46 46 46 34 34 34 6 6 6 2 2 6
39438- 42 42 42 78 78 78 42 42 42 18 18 18
39439- 6 6 6 0 0 0 0 0 0 0 0 0
39440- 0 0 0 0 0 0 0 0 0 0 0 0
39441- 0 0 0 0 0 0 0 0 0 0 0 0
39442- 0 0 0 0 0 0 0 0 0 0 0 0
39443- 0 0 0 0 0 0 0 0 0 0 0 0
39444- 0 0 0 0 0 0 0 0 0 0 0 0
39445- 0 0 0 0 0 0 0 0 0 0 0 0
39446- 0 0 0 0 0 0 0 0 0 0 0 0
39447- 0 0 1 0 0 0 0 0 1 0 0 0
39448- 0 0 0 0 0 0 0 0 0 0 0 0
39449- 0 0 0 0 0 0 0 0 0 0 0 0
39450- 0 0 0 0 0 0 0 0 0 0 0 0
39451- 0 0 0 0 0 0 0 0 0 0 0 0
39452- 0 0 0 0 0 0 0 0 0 0 0 0
39453- 10 10 10 30 30 30 66 66 66 58 58 58
39454- 2 2 6 2 2 6 2 2 6 2 2 6
39455- 2 2 6 2 2 6 2 2 6 2 2 6
39456- 2 2 6 2 2 6 2 2 6 26 26 26
39457- 86 86 86 101 101 101 46 46 46 10 10 10
39458- 2 2 6 58 58 58 70 70 70 34 34 34
39459- 10 10 10 0 0 0 0 0 0 0 0 0
39460- 0 0 0 0 0 0 0 0 0 0 0 0
39461- 0 0 0 0 0 0 0 0 0 0 0 0
39462- 0 0 0 0 0 0 0 0 0 0 0 0
39463- 0 0 0 0 0 0 0 0 0 0 0 0
39464- 0 0 0 0 0 0 0 0 0 0 0 0
39465- 0 0 0 0 0 0 0 0 0 0 0 0
39466- 0 0 0 0 0 0 0 0 0 0 0 0
39467- 0 0 1 0 0 1 0 0 1 0 0 0
39468- 0 0 0 0 0 0 0 0 0 0 0 0
39469- 0 0 0 0 0 0 0 0 0 0 0 0
39470- 0 0 0 0 0 0 0 0 0 0 0 0
39471- 0 0 0 0 0 0 0 0 0 0 0 0
39472- 0 0 0 0 0 0 0 0 0 0 0 0
39473- 14 14 14 42 42 42 86 86 86 10 10 10
39474- 2 2 6 2 2 6 2 2 6 2 2 6
39475- 2 2 6 2 2 6 2 2 6 2 2 6
39476- 2 2 6 2 2 6 2 2 6 30 30 30
39477- 94 94 94 94 94 94 58 58 58 26 26 26
39478- 2 2 6 6 6 6 78 78 78 54 54 54
39479- 22 22 22 6 6 6 0 0 0 0 0 0
39480- 0 0 0 0 0 0 0 0 0 0 0 0
39481- 0 0 0 0 0 0 0 0 0 0 0 0
39482- 0 0 0 0 0 0 0 0 0 0 0 0
39483- 0 0 0 0 0 0 0 0 0 0 0 0
39484- 0 0 0 0 0 0 0 0 0 0 0 0
39485- 0 0 0 0 0 0 0 0 0 0 0 0
39486- 0 0 0 0 0 0 0 0 0 0 0 0
39487- 0 0 0 0 0 0 0 0 0 0 0 0
39488- 0 0 0 0 0 0 0 0 0 0 0 0
39489- 0 0 0 0 0 0 0 0 0 0 0 0
39490- 0 0 0 0 0 0 0 0 0 0 0 0
39491- 0 0 0 0 0 0 0 0 0 0 0 0
39492- 0 0 0 0 0 0 0 0 0 6 6 6
39493- 22 22 22 62 62 62 62 62 62 2 2 6
39494- 2 2 6 2 2 6 2 2 6 2 2 6
39495- 2 2 6 2 2 6 2 2 6 2 2 6
39496- 2 2 6 2 2 6 2 2 6 26 26 26
39497- 54 54 54 38 38 38 18 18 18 10 10 10
39498- 2 2 6 2 2 6 34 34 34 82 82 82
39499- 38 38 38 14 14 14 0 0 0 0 0 0
39500- 0 0 0 0 0 0 0 0 0 0 0 0
39501- 0 0 0 0 0 0 0 0 0 0 0 0
39502- 0 0 0 0 0 0 0 0 0 0 0 0
39503- 0 0 0 0 0 0 0 0 0 0 0 0
39504- 0 0 0 0 0 0 0 0 0 0 0 0
39505- 0 0 0 0 0 0 0 0 0 0 0 0
39506- 0 0 0 0 0 0 0 0 0 0 0 0
39507- 0 0 0 0 0 1 0 0 1 0 0 0
39508- 0 0 0 0 0 0 0 0 0 0 0 0
39509- 0 0 0 0 0 0 0 0 0 0 0 0
39510- 0 0 0 0 0 0 0 0 0 0 0 0
39511- 0 0 0 0 0 0 0 0 0 0 0 0
39512- 0 0 0 0 0 0 0 0 0 6 6 6
39513- 30 30 30 78 78 78 30 30 30 2 2 6
39514- 2 2 6 2 2 6 2 2 6 2 2 6
39515- 2 2 6 2 2 6 2 2 6 2 2 6
39516- 2 2 6 2 2 6 2 2 6 10 10 10
39517- 10 10 10 2 2 6 2 2 6 2 2 6
39518- 2 2 6 2 2 6 2 2 6 78 78 78
39519- 50 50 50 18 18 18 6 6 6 0 0 0
39520- 0 0 0 0 0 0 0 0 0 0 0 0
39521- 0 0 0 0 0 0 0 0 0 0 0 0
39522- 0 0 0 0 0 0 0 0 0 0 0 0
39523- 0 0 0 0 0 0 0 0 0 0 0 0
39524- 0 0 0 0 0 0 0 0 0 0 0 0
39525- 0 0 0 0 0 0 0 0 0 0 0 0
39526- 0 0 0 0 0 0 0 0 0 0 0 0
39527- 0 0 1 0 0 0 0 0 0 0 0 0
39528- 0 0 0 0 0 0 0 0 0 0 0 0
39529- 0 0 0 0 0 0 0 0 0 0 0 0
39530- 0 0 0 0 0 0 0 0 0 0 0 0
39531- 0 0 0 0 0 0 0 0 0 0 0 0
39532- 0 0 0 0 0 0 0 0 0 10 10 10
39533- 38 38 38 86 86 86 14 14 14 2 2 6
39534- 2 2 6 2 2 6 2 2 6 2 2 6
39535- 2 2 6 2 2 6 2 2 6 2 2 6
39536- 2 2 6 2 2 6 2 2 6 2 2 6
39537- 2 2 6 2 2 6 2 2 6 2 2 6
39538- 2 2 6 2 2 6 2 2 6 54 54 54
39539- 66 66 66 26 26 26 6 6 6 0 0 0
39540- 0 0 0 0 0 0 0 0 0 0 0 0
39541- 0 0 0 0 0 0 0 0 0 0 0 0
39542- 0 0 0 0 0 0 0 0 0 0 0 0
39543- 0 0 0 0 0 0 0 0 0 0 0 0
39544- 0 0 0 0 0 0 0 0 0 0 0 0
39545- 0 0 0 0 0 0 0 0 0 0 0 0
39546- 0 0 0 0 0 0 0 0 0 0 0 0
39547- 0 0 0 0 0 1 0 0 1 0 0 0
39548- 0 0 0 0 0 0 0 0 0 0 0 0
39549- 0 0 0 0 0 0 0 0 0 0 0 0
39550- 0 0 0 0 0 0 0 0 0 0 0 0
39551- 0 0 0 0 0 0 0 0 0 0 0 0
39552- 0 0 0 0 0 0 0 0 0 14 14 14
39553- 42 42 42 82 82 82 2 2 6 2 2 6
39554- 2 2 6 6 6 6 10 10 10 2 2 6
39555- 2 2 6 2 2 6 2 2 6 2 2 6
39556- 2 2 6 2 2 6 2 2 6 6 6 6
39557- 14 14 14 10 10 10 2 2 6 2 2 6
39558- 2 2 6 2 2 6 2 2 6 18 18 18
39559- 82 82 82 34 34 34 10 10 10 0 0 0
39560- 0 0 0 0 0 0 0 0 0 0 0 0
39561- 0 0 0 0 0 0 0 0 0 0 0 0
39562- 0 0 0 0 0 0 0 0 0 0 0 0
39563- 0 0 0 0 0 0 0 0 0 0 0 0
39564- 0 0 0 0 0 0 0 0 0 0 0 0
39565- 0 0 0 0 0 0 0 0 0 0 0 0
39566- 0 0 0 0 0 0 0 0 0 0 0 0
39567- 0 0 1 0 0 0 0 0 0 0 0 0
39568- 0 0 0 0 0 0 0 0 0 0 0 0
39569- 0 0 0 0 0 0 0 0 0 0 0 0
39570- 0 0 0 0 0 0 0 0 0 0 0 0
39571- 0 0 0 0 0 0 0 0 0 0 0 0
39572- 0 0 0 0 0 0 0 0 0 14 14 14
39573- 46 46 46 86 86 86 2 2 6 2 2 6
39574- 6 6 6 6 6 6 22 22 22 34 34 34
39575- 6 6 6 2 2 6 2 2 6 2 2 6
39576- 2 2 6 2 2 6 18 18 18 34 34 34
39577- 10 10 10 50 50 50 22 22 22 2 2 6
39578- 2 2 6 2 2 6 2 2 6 10 10 10
39579- 86 86 86 42 42 42 14 14 14 0 0 0
39580- 0 0 0 0 0 0 0 0 0 0 0 0
39581- 0 0 0 0 0 0 0 0 0 0 0 0
39582- 0 0 0 0 0 0 0 0 0 0 0 0
39583- 0 0 0 0 0 0 0 0 0 0 0 0
39584- 0 0 0 0 0 0 0 0 0 0 0 0
39585- 0 0 0 0 0 0 0 0 0 0 0 0
39586- 0 0 0 0 0 0 0 0 0 0 0 0
39587- 0 0 1 0 0 1 0 0 1 0 0 0
39588- 0 0 0 0 0 0 0 0 0 0 0 0
39589- 0 0 0 0 0 0 0 0 0 0 0 0
39590- 0 0 0 0 0 0 0 0 0 0 0 0
39591- 0 0 0 0 0 0 0 0 0 0 0 0
39592- 0 0 0 0 0 0 0 0 0 14 14 14
39593- 46 46 46 86 86 86 2 2 6 2 2 6
39594- 38 38 38 116 116 116 94 94 94 22 22 22
39595- 22 22 22 2 2 6 2 2 6 2 2 6
39596- 14 14 14 86 86 86 138 138 138 162 162 162
39597-154 154 154 38 38 38 26 26 26 6 6 6
39598- 2 2 6 2 2 6 2 2 6 2 2 6
39599- 86 86 86 46 46 46 14 14 14 0 0 0
39600- 0 0 0 0 0 0 0 0 0 0 0 0
39601- 0 0 0 0 0 0 0 0 0 0 0 0
39602- 0 0 0 0 0 0 0 0 0 0 0 0
39603- 0 0 0 0 0 0 0 0 0 0 0 0
39604- 0 0 0 0 0 0 0 0 0 0 0 0
39605- 0 0 0 0 0 0 0 0 0 0 0 0
39606- 0 0 0 0 0 0 0 0 0 0 0 0
39607- 0 0 0 0 0 0 0 0 0 0 0 0
39608- 0 0 0 0 0 0 0 0 0 0 0 0
39609- 0 0 0 0 0 0 0 0 0 0 0 0
39610- 0 0 0 0 0 0 0 0 0 0 0 0
39611- 0 0 0 0 0 0 0 0 0 0 0 0
39612- 0 0 0 0 0 0 0 0 0 14 14 14
39613- 46 46 46 86 86 86 2 2 6 14 14 14
39614-134 134 134 198 198 198 195 195 195 116 116 116
39615- 10 10 10 2 2 6 2 2 6 6 6 6
39616-101 98 89 187 187 187 210 210 210 218 218 218
39617-214 214 214 134 134 134 14 14 14 6 6 6
39618- 2 2 6 2 2 6 2 2 6 2 2 6
39619- 86 86 86 50 50 50 18 18 18 6 6 6
39620- 0 0 0 0 0 0 0 0 0 0 0 0
39621- 0 0 0 0 0 0 0 0 0 0 0 0
39622- 0 0 0 0 0 0 0 0 0 0 0 0
39623- 0 0 0 0 0 0 0 0 0 0 0 0
39624- 0 0 0 0 0 0 0 0 0 0 0 0
39625- 0 0 0 0 0 0 0 0 0 0 0 0
39626- 0 0 0 0 0 0 0 0 1 0 0 0
39627- 0 0 1 0 0 1 0 0 1 0 0 0
39628- 0 0 0 0 0 0 0 0 0 0 0 0
39629- 0 0 0 0 0 0 0 0 0 0 0 0
39630- 0 0 0 0 0 0 0 0 0 0 0 0
39631- 0 0 0 0 0 0 0 0 0 0 0 0
39632- 0 0 0 0 0 0 0 0 0 14 14 14
39633- 46 46 46 86 86 86 2 2 6 54 54 54
39634-218 218 218 195 195 195 226 226 226 246 246 246
39635- 58 58 58 2 2 6 2 2 6 30 30 30
39636-210 210 210 253 253 253 174 174 174 123 123 123
39637-221 221 221 234 234 234 74 74 74 2 2 6
39638- 2 2 6 2 2 6 2 2 6 2 2 6
39639- 70 70 70 58 58 58 22 22 22 6 6 6
39640- 0 0 0 0 0 0 0 0 0 0 0 0
39641- 0 0 0 0 0 0 0 0 0 0 0 0
39642- 0 0 0 0 0 0 0 0 0 0 0 0
39643- 0 0 0 0 0 0 0 0 0 0 0 0
39644- 0 0 0 0 0 0 0 0 0 0 0 0
39645- 0 0 0 0 0 0 0 0 0 0 0 0
39646- 0 0 0 0 0 0 0 0 0 0 0 0
39647- 0 0 0 0 0 0 0 0 0 0 0 0
39648- 0 0 0 0 0 0 0 0 0 0 0 0
39649- 0 0 0 0 0 0 0 0 0 0 0 0
39650- 0 0 0 0 0 0 0 0 0 0 0 0
39651- 0 0 0 0 0 0 0 0 0 0 0 0
39652- 0 0 0 0 0 0 0 0 0 14 14 14
39653- 46 46 46 82 82 82 2 2 6 106 106 106
39654-170 170 170 26 26 26 86 86 86 226 226 226
39655-123 123 123 10 10 10 14 14 14 46 46 46
39656-231 231 231 190 190 190 6 6 6 70 70 70
39657- 90 90 90 238 238 238 158 158 158 2 2 6
39658- 2 2 6 2 2 6 2 2 6 2 2 6
39659- 70 70 70 58 58 58 22 22 22 6 6 6
39660- 0 0 0 0 0 0 0 0 0 0 0 0
39661- 0 0 0 0 0 0 0 0 0 0 0 0
39662- 0 0 0 0 0 0 0 0 0 0 0 0
39663- 0 0 0 0 0 0 0 0 0 0 0 0
39664- 0 0 0 0 0 0 0 0 0 0 0 0
39665- 0 0 0 0 0 0 0 0 0 0 0 0
39666- 0 0 0 0 0 0 0 0 1 0 0 0
39667- 0 0 1 0 0 1 0 0 1 0 0 0
39668- 0 0 0 0 0 0 0 0 0 0 0 0
39669- 0 0 0 0 0 0 0 0 0 0 0 0
39670- 0 0 0 0 0 0 0 0 0 0 0 0
39671- 0 0 0 0 0 0 0 0 0 0 0 0
39672- 0 0 0 0 0 0 0 0 0 14 14 14
39673- 42 42 42 86 86 86 6 6 6 116 116 116
39674-106 106 106 6 6 6 70 70 70 149 149 149
39675-128 128 128 18 18 18 38 38 38 54 54 54
39676-221 221 221 106 106 106 2 2 6 14 14 14
39677- 46 46 46 190 190 190 198 198 198 2 2 6
39678- 2 2 6 2 2 6 2 2 6 2 2 6
39679- 74 74 74 62 62 62 22 22 22 6 6 6
39680- 0 0 0 0 0 0 0 0 0 0 0 0
39681- 0 0 0 0 0 0 0 0 0 0 0 0
39682- 0 0 0 0 0 0 0 0 0 0 0 0
39683- 0 0 0 0 0 0 0 0 0 0 0 0
39684- 0 0 0 0 0 0 0 0 0 0 0 0
39685- 0 0 0 0 0 0 0 0 0 0 0 0
39686- 0 0 0 0 0 0 0 0 1 0 0 0
39687- 0 0 1 0 0 0 0 0 1 0 0 0
39688- 0 0 0 0 0 0 0 0 0 0 0 0
39689- 0 0 0 0 0 0 0 0 0 0 0 0
39690- 0 0 0 0 0 0 0 0 0 0 0 0
39691- 0 0 0 0 0 0 0 0 0 0 0 0
39692- 0 0 0 0 0 0 0 0 0 14 14 14
39693- 42 42 42 94 94 94 14 14 14 101 101 101
39694-128 128 128 2 2 6 18 18 18 116 116 116
39695-118 98 46 121 92 8 121 92 8 98 78 10
39696-162 162 162 106 106 106 2 2 6 2 2 6
39697- 2 2 6 195 195 195 195 195 195 6 6 6
39698- 2 2 6 2 2 6 2 2 6 2 2 6
39699- 74 74 74 62 62 62 22 22 22 6 6 6
39700- 0 0 0 0 0 0 0 0 0 0 0 0
39701- 0 0 0 0 0 0 0 0 0 0 0 0
39702- 0 0 0 0 0 0 0 0 0 0 0 0
39703- 0 0 0 0 0 0 0 0 0 0 0 0
39704- 0 0 0 0 0 0 0 0 0 0 0 0
39705- 0 0 0 0 0 0 0 0 0 0 0 0
39706- 0 0 0 0 0 0 0 0 1 0 0 1
39707- 0 0 1 0 0 0 0 0 1 0 0 0
39708- 0 0 0 0 0 0 0 0 0 0 0 0
39709- 0 0 0 0 0 0 0 0 0 0 0 0
39710- 0 0 0 0 0 0 0 0 0 0 0 0
39711- 0 0 0 0 0 0 0 0 0 0 0 0
39712- 0 0 0 0 0 0 0 0 0 10 10 10
39713- 38 38 38 90 90 90 14 14 14 58 58 58
39714-210 210 210 26 26 26 54 38 6 154 114 10
39715-226 170 11 236 186 11 225 175 15 184 144 12
39716-215 174 15 175 146 61 37 26 9 2 2 6
39717- 70 70 70 246 246 246 138 138 138 2 2 6
39718- 2 2 6 2 2 6 2 2 6 2 2 6
39719- 70 70 70 66 66 66 26 26 26 6 6 6
39720- 0 0 0 0 0 0 0 0 0 0 0 0
39721- 0 0 0 0 0 0 0 0 0 0 0 0
39722- 0 0 0 0 0 0 0 0 0 0 0 0
39723- 0 0 0 0 0 0 0 0 0 0 0 0
39724- 0 0 0 0 0 0 0 0 0 0 0 0
39725- 0 0 0 0 0 0 0 0 0 0 0 0
39726- 0 0 0 0 0 0 0 0 0 0 0 0
39727- 0 0 0 0 0 0 0 0 0 0 0 0
39728- 0 0 0 0 0 0 0 0 0 0 0 0
39729- 0 0 0 0 0 0 0 0 0 0 0 0
39730- 0 0 0 0 0 0 0 0 0 0 0 0
39731- 0 0 0 0 0 0 0 0 0 0 0 0
39732- 0 0 0 0 0 0 0 0 0 10 10 10
39733- 38 38 38 86 86 86 14 14 14 10 10 10
39734-195 195 195 188 164 115 192 133 9 225 175 15
39735-239 182 13 234 190 10 232 195 16 232 200 30
39736-245 207 45 241 208 19 232 195 16 184 144 12
39737-218 194 134 211 206 186 42 42 42 2 2 6
39738- 2 2 6 2 2 6 2 2 6 2 2 6
39739- 50 50 50 74 74 74 30 30 30 6 6 6
39740- 0 0 0 0 0 0 0 0 0 0 0 0
39741- 0 0 0 0 0 0 0 0 0 0 0 0
39742- 0 0 0 0 0 0 0 0 0 0 0 0
39743- 0 0 0 0 0 0 0 0 0 0 0 0
39744- 0 0 0 0 0 0 0 0 0 0 0 0
39745- 0 0 0 0 0 0 0 0 0 0 0 0
39746- 0 0 0 0 0 0 0 0 0 0 0 0
39747- 0 0 0 0 0 0 0 0 0 0 0 0
39748- 0 0 0 0 0 0 0 0 0 0 0 0
39749- 0 0 0 0 0 0 0 0 0 0 0 0
39750- 0 0 0 0 0 0 0 0 0 0 0 0
39751- 0 0 0 0 0 0 0 0 0 0 0 0
39752- 0 0 0 0 0 0 0 0 0 10 10 10
39753- 34 34 34 86 86 86 14 14 14 2 2 6
39754-121 87 25 192 133 9 219 162 10 239 182 13
39755-236 186 11 232 195 16 241 208 19 244 214 54
39756-246 218 60 246 218 38 246 215 20 241 208 19
39757-241 208 19 226 184 13 121 87 25 2 2 6
39758- 2 2 6 2 2 6 2 2 6 2 2 6
39759- 50 50 50 82 82 82 34 34 34 10 10 10
39760- 0 0 0 0 0 0 0 0 0 0 0 0
39761- 0 0 0 0 0 0 0 0 0 0 0 0
39762- 0 0 0 0 0 0 0 0 0 0 0 0
39763- 0 0 0 0 0 0 0 0 0 0 0 0
39764- 0 0 0 0 0 0 0 0 0 0 0 0
39765- 0 0 0 0 0 0 0 0 0 0 0 0
39766- 0 0 0 0 0 0 0 0 0 0 0 0
39767- 0 0 0 0 0 0 0 0 0 0 0 0
39768- 0 0 0 0 0 0 0 0 0 0 0 0
39769- 0 0 0 0 0 0 0 0 0 0 0 0
39770- 0 0 0 0 0 0 0 0 0 0 0 0
39771- 0 0 0 0 0 0 0 0 0 0 0 0
39772- 0 0 0 0 0 0 0 0 0 10 10 10
39773- 34 34 34 82 82 82 30 30 30 61 42 6
39774-180 123 7 206 145 10 230 174 11 239 182 13
39775-234 190 10 238 202 15 241 208 19 246 218 74
39776-246 218 38 246 215 20 246 215 20 246 215 20
39777-226 184 13 215 174 15 184 144 12 6 6 6
39778- 2 2 6 2 2 6 2 2 6 2 2 6
39779- 26 26 26 94 94 94 42 42 42 14 14 14
39780- 0 0 0 0 0 0 0 0 0 0 0 0
39781- 0 0 0 0 0 0 0 0 0 0 0 0
39782- 0 0 0 0 0 0 0 0 0 0 0 0
39783- 0 0 0 0 0 0 0 0 0 0 0 0
39784- 0 0 0 0 0 0 0 0 0 0 0 0
39785- 0 0 0 0 0 0 0 0 0 0 0 0
39786- 0 0 0 0 0 0 0 0 0 0 0 0
39787- 0 0 0 0 0 0 0 0 0 0 0 0
39788- 0 0 0 0 0 0 0 0 0 0 0 0
39789- 0 0 0 0 0 0 0 0 0 0 0 0
39790- 0 0 0 0 0 0 0 0 0 0 0 0
39791- 0 0 0 0 0 0 0 0 0 0 0 0
39792- 0 0 0 0 0 0 0 0 0 10 10 10
39793- 30 30 30 78 78 78 50 50 50 104 69 6
39794-192 133 9 216 158 10 236 178 12 236 186 11
39795-232 195 16 241 208 19 244 214 54 245 215 43
39796-246 215 20 246 215 20 241 208 19 198 155 10
39797-200 144 11 216 158 10 156 118 10 2 2 6
39798- 2 2 6 2 2 6 2 2 6 2 2 6
39799- 6 6 6 90 90 90 54 54 54 18 18 18
39800- 6 6 6 0 0 0 0 0 0 0 0 0
39801- 0 0 0 0 0 0 0 0 0 0 0 0
39802- 0 0 0 0 0 0 0 0 0 0 0 0
39803- 0 0 0 0 0 0 0 0 0 0 0 0
39804- 0 0 0 0 0 0 0 0 0 0 0 0
39805- 0 0 0 0 0 0 0 0 0 0 0 0
39806- 0 0 0 0 0 0 0 0 0 0 0 0
39807- 0 0 0 0 0 0 0 0 0 0 0 0
39808- 0 0 0 0 0 0 0 0 0 0 0 0
39809- 0 0 0 0 0 0 0 0 0 0 0 0
39810- 0 0 0 0 0 0 0 0 0 0 0 0
39811- 0 0 0 0 0 0 0 0 0 0 0 0
39812- 0 0 0 0 0 0 0 0 0 10 10 10
39813- 30 30 30 78 78 78 46 46 46 22 22 22
39814-137 92 6 210 162 10 239 182 13 238 190 10
39815-238 202 15 241 208 19 246 215 20 246 215 20
39816-241 208 19 203 166 17 185 133 11 210 150 10
39817-216 158 10 210 150 10 102 78 10 2 2 6
39818- 6 6 6 54 54 54 14 14 14 2 2 6
39819- 2 2 6 62 62 62 74 74 74 30 30 30
39820- 10 10 10 0 0 0 0 0 0 0 0 0
39821- 0 0 0 0 0 0 0 0 0 0 0 0
39822- 0 0 0 0 0 0 0 0 0 0 0 0
39823- 0 0 0 0 0 0 0 0 0 0 0 0
39824- 0 0 0 0 0 0 0 0 0 0 0 0
39825- 0 0 0 0 0 0 0 0 0 0 0 0
39826- 0 0 0 0 0 0 0 0 0 0 0 0
39827- 0 0 0 0 0 0 0 0 0 0 0 0
39828- 0 0 0 0 0 0 0 0 0 0 0 0
39829- 0 0 0 0 0 0 0 0 0 0 0 0
39830- 0 0 0 0 0 0 0 0 0 0 0 0
39831- 0 0 0 0 0 0 0 0 0 0 0 0
39832- 0 0 0 0 0 0 0 0 0 10 10 10
39833- 34 34 34 78 78 78 50 50 50 6 6 6
39834- 94 70 30 139 102 15 190 146 13 226 184 13
39835-232 200 30 232 195 16 215 174 15 190 146 13
39836-168 122 10 192 133 9 210 150 10 213 154 11
39837-202 150 34 182 157 106 101 98 89 2 2 6
39838- 2 2 6 78 78 78 116 116 116 58 58 58
39839- 2 2 6 22 22 22 90 90 90 46 46 46
39840- 18 18 18 6 6 6 0 0 0 0 0 0
39841- 0 0 0 0 0 0 0 0 0 0 0 0
39842- 0 0 0 0 0 0 0 0 0 0 0 0
39843- 0 0 0 0 0 0 0 0 0 0 0 0
39844- 0 0 0 0 0 0 0 0 0 0 0 0
39845- 0 0 0 0 0 0 0 0 0 0 0 0
39846- 0 0 0 0 0 0 0 0 0 0 0 0
39847- 0 0 0 0 0 0 0 0 0 0 0 0
39848- 0 0 0 0 0 0 0 0 0 0 0 0
39849- 0 0 0 0 0 0 0 0 0 0 0 0
39850- 0 0 0 0 0 0 0 0 0 0 0 0
39851- 0 0 0 0 0 0 0 0 0 0 0 0
39852- 0 0 0 0 0 0 0 0 0 10 10 10
39853- 38 38 38 86 86 86 50 50 50 6 6 6
39854-128 128 128 174 154 114 156 107 11 168 122 10
39855-198 155 10 184 144 12 197 138 11 200 144 11
39856-206 145 10 206 145 10 197 138 11 188 164 115
39857-195 195 195 198 198 198 174 174 174 14 14 14
39858- 2 2 6 22 22 22 116 116 116 116 116 116
39859- 22 22 22 2 2 6 74 74 74 70 70 70
39860- 30 30 30 10 10 10 0 0 0 0 0 0
39861- 0 0 0 0 0 0 0 0 0 0 0 0
39862- 0 0 0 0 0 0 0 0 0 0 0 0
39863- 0 0 0 0 0 0 0 0 0 0 0 0
39864- 0 0 0 0 0 0 0 0 0 0 0 0
39865- 0 0 0 0 0 0 0 0 0 0 0 0
39866- 0 0 0 0 0 0 0 0 0 0 0 0
39867- 0 0 0 0 0 0 0 0 0 0 0 0
39868- 0 0 0 0 0 0 0 0 0 0 0 0
39869- 0 0 0 0 0 0 0 0 0 0 0 0
39870- 0 0 0 0 0 0 0 0 0 0 0 0
39871- 0 0 0 0 0 0 0 0 0 0 0 0
39872- 0 0 0 0 0 0 6 6 6 18 18 18
39873- 50 50 50 101 101 101 26 26 26 10 10 10
39874-138 138 138 190 190 190 174 154 114 156 107 11
39875-197 138 11 200 144 11 197 138 11 192 133 9
39876-180 123 7 190 142 34 190 178 144 187 187 187
39877-202 202 202 221 221 221 214 214 214 66 66 66
39878- 2 2 6 2 2 6 50 50 50 62 62 62
39879- 6 6 6 2 2 6 10 10 10 90 90 90
39880- 50 50 50 18 18 18 6 6 6 0 0 0
39881- 0 0 0 0 0 0 0 0 0 0 0 0
39882- 0 0 0 0 0 0 0 0 0 0 0 0
39883- 0 0 0 0 0 0 0 0 0 0 0 0
39884- 0 0 0 0 0 0 0 0 0 0 0 0
39885- 0 0 0 0 0 0 0 0 0 0 0 0
39886- 0 0 0 0 0 0 0 0 0 0 0 0
39887- 0 0 0 0 0 0 0 0 0 0 0 0
39888- 0 0 0 0 0 0 0 0 0 0 0 0
39889- 0 0 0 0 0 0 0 0 0 0 0 0
39890- 0 0 0 0 0 0 0 0 0 0 0 0
39891- 0 0 0 0 0 0 0 0 0 0 0 0
39892- 0 0 0 0 0 0 10 10 10 34 34 34
39893- 74 74 74 74 74 74 2 2 6 6 6 6
39894-144 144 144 198 198 198 190 190 190 178 166 146
39895-154 121 60 156 107 11 156 107 11 168 124 44
39896-174 154 114 187 187 187 190 190 190 210 210 210
39897-246 246 246 253 253 253 253 253 253 182 182 182
39898- 6 6 6 2 2 6 2 2 6 2 2 6
39899- 2 2 6 2 2 6 2 2 6 62 62 62
39900- 74 74 74 34 34 34 14 14 14 0 0 0
39901- 0 0 0 0 0 0 0 0 0 0 0 0
39902- 0 0 0 0 0 0 0 0 0 0 0 0
39903- 0 0 0 0 0 0 0 0 0 0 0 0
39904- 0 0 0 0 0 0 0 0 0 0 0 0
39905- 0 0 0 0 0 0 0 0 0 0 0 0
39906- 0 0 0 0 0 0 0 0 0 0 0 0
39907- 0 0 0 0 0 0 0 0 0 0 0 0
39908- 0 0 0 0 0 0 0 0 0 0 0 0
39909- 0 0 0 0 0 0 0 0 0 0 0 0
39910- 0 0 0 0 0 0 0 0 0 0 0 0
39911- 0 0 0 0 0 0 0 0 0 0 0 0
39912- 0 0 0 10 10 10 22 22 22 54 54 54
39913- 94 94 94 18 18 18 2 2 6 46 46 46
39914-234 234 234 221 221 221 190 190 190 190 190 190
39915-190 190 190 187 187 187 187 187 187 190 190 190
39916-190 190 190 195 195 195 214 214 214 242 242 242
39917-253 253 253 253 253 253 253 253 253 253 253 253
39918- 82 82 82 2 2 6 2 2 6 2 2 6
39919- 2 2 6 2 2 6 2 2 6 14 14 14
39920- 86 86 86 54 54 54 22 22 22 6 6 6
39921- 0 0 0 0 0 0 0 0 0 0 0 0
39922- 0 0 0 0 0 0 0 0 0 0 0 0
39923- 0 0 0 0 0 0 0 0 0 0 0 0
39924- 0 0 0 0 0 0 0 0 0 0 0 0
39925- 0 0 0 0 0 0 0 0 0 0 0 0
39926- 0 0 0 0 0 0 0 0 0 0 0 0
39927- 0 0 0 0 0 0 0 0 0 0 0 0
39928- 0 0 0 0 0 0 0 0 0 0 0 0
39929- 0 0 0 0 0 0 0 0 0 0 0 0
39930- 0 0 0 0 0 0 0 0 0 0 0 0
39931- 0 0 0 0 0 0 0 0 0 0 0 0
39932- 6 6 6 18 18 18 46 46 46 90 90 90
39933- 46 46 46 18 18 18 6 6 6 182 182 182
39934-253 253 253 246 246 246 206 206 206 190 190 190
39935-190 190 190 190 190 190 190 190 190 190 190 190
39936-206 206 206 231 231 231 250 250 250 253 253 253
39937-253 253 253 253 253 253 253 253 253 253 253 253
39938-202 202 202 14 14 14 2 2 6 2 2 6
39939- 2 2 6 2 2 6 2 2 6 2 2 6
39940- 42 42 42 86 86 86 42 42 42 18 18 18
39941- 6 6 6 0 0 0 0 0 0 0 0 0
39942- 0 0 0 0 0 0 0 0 0 0 0 0
39943- 0 0 0 0 0 0 0 0 0 0 0 0
39944- 0 0 0 0 0 0 0 0 0 0 0 0
39945- 0 0 0 0 0 0 0 0 0 0 0 0
39946- 0 0 0 0 0 0 0 0 0 0 0 0
39947- 0 0 0 0 0 0 0 0 0 0 0 0
39948- 0 0 0 0 0 0 0 0 0 0 0 0
39949- 0 0 0 0 0 0 0 0 0 0 0 0
39950- 0 0 0 0 0 0 0 0 0 0 0 0
39951- 0 0 0 0 0 0 0 0 0 6 6 6
39952- 14 14 14 38 38 38 74 74 74 66 66 66
39953- 2 2 6 6 6 6 90 90 90 250 250 250
39954-253 253 253 253 253 253 238 238 238 198 198 198
39955-190 190 190 190 190 190 195 195 195 221 221 221
39956-246 246 246 253 253 253 253 253 253 253 253 253
39957-253 253 253 253 253 253 253 253 253 253 253 253
39958-253 253 253 82 82 82 2 2 6 2 2 6
39959- 2 2 6 2 2 6 2 2 6 2 2 6
39960- 2 2 6 78 78 78 70 70 70 34 34 34
39961- 14 14 14 6 6 6 0 0 0 0 0 0
39962- 0 0 0 0 0 0 0 0 0 0 0 0
39963- 0 0 0 0 0 0 0 0 0 0 0 0
39964- 0 0 0 0 0 0 0 0 0 0 0 0
39965- 0 0 0 0 0 0 0 0 0 0 0 0
39966- 0 0 0 0 0 0 0 0 0 0 0 0
39967- 0 0 0 0 0 0 0 0 0 0 0 0
39968- 0 0 0 0 0 0 0 0 0 0 0 0
39969- 0 0 0 0 0 0 0 0 0 0 0 0
39970- 0 0 0 0 0 0 0 0 0 0 0 0
39971- 0 0 0 0 0 0 0 0 0 14 14 14
39972- 34 34 34 66 66 66 78 78 78 6 6 6
39973- 2 2 6 18 18 18 218 218 218 253 253 253
39974-253 253 253 253 253 253 253 253 253 246 246 246
39975-226 226 226 231 231 231 246 246 246 253 253 253
39976-253 253 253 253 253 253 253 253 253 253 253 253
39977-253 253 253 253 253 253 253 253 253 253 253 253
39978-253 253 253 178 178 178 2 2 6 2 2 6
39979- 2 2 6 2 2 6 2 2 6 2 2 6
39980- 2 2 6 18 18 18 90 90 90 62 62 62
39981- 30 30 30 10 10 10 0 0 0 0 0 0
39982- 0 0 0 0 0 0 0 0 0 0 0 0
39983- 0 0 0 0 0 0 0 0 0 0 0 0
39984- 0 0 0 0 0 0 0 0 0 0 0 0
39985- 0 0 0 0 0 0 0 0 0 0 0 0
39986- 0 0 0 0 0 0 0 0 0 0 0 0
39987- 0 0 0 0 0 0 0 0 0 0 0 0
39988- 0 0 0 0 0 0 0 0 0 0 0 0
39989- 0 0 0 0 0 0 0 0 0 0 0 0
39990- 0 0 0 0 0 0 0 0 0 0 0 0
39991- 0 0 0 0 0 0 10 10 10 26 26 26
39992- 58 58 58 90 90 90 18 18 18 2 2 6
39993- 2 2 6 110 110 110 253 253 253 253 253 253
39994-253 253 253 253 253 253 253 253 253 253 253 253
39995-250 250 250 253 253 253 253 253 253 253 253 253
39996-253 253 253 253 253 253 253 253 253 253 253 253
39997-253 253 253 253 253 253 253 253 253 253 253 253
39998-253 253 253 231 231 231 18 18 18 2 2 6
39999- 2 2 6 2 2 6 2 2 6 2 2 6
40000- 2 2 6 2 2 6 18 18 18 94 94 94
40001- 54 54 54 26 26 26 10 10 10 0 0 0
40002- 0 0 0 0 0 0 0 0 0 0 0 0
40003- 0 0 0 0 0 0 0 0 0 0 0 0
40004- 0 0 0 0 0 0 0 0 0 0 0 0
40005- 0 0 0 0 0 0 0 0 0 0 0 0
40006- 0 0 0 0 0 0 0 0 0 0 0 0
40007- 0 0 0 0 0 0 0 0 0 0 0 0
40008- 0 0 0 0 0 0 0 0 0 0 0 0
40009- 0 0 0 0 0 0 0 0 0 0 0 0
40010- 0 0 0 0 0 0 0 0 0 0 0 0
40011- 0 0 0 6 6 6 22 22 22 50 50 50
40012- 90 90 90 26 26 26 2 2 6 2 2 6
40013- 14 14 14 195 195 195 250 250 250 253 253 253
40014-253 253 253 253 253 253 253 253 253 253 253 253
40015-253 253 253 253 253 253 253 253 253 253 253 253
40016-253 253 253 253 253 253 253 253 253 253 253 253
40017-253 253 253 253 253 253 253 253 253 253 253 253
40018-250 250 250 242 242 242 54 54 54 2 2 6
40019- 2 2 6 2 2 6 2 2 6 2 2 6
40020- 2 2 6 2 2 6 2 2 6 38 38 38
40021- 86 86 86 50 50 50 22 22 22 6 6 6
40022- 0 0 0 0 0 0 0 0 0 0 0 0
40023- 0 0 0 0 0 0 0 0 0 0 0 0
40024- 0 0 0 0 0 0 0 0 0 0 0 0
40025- 0 0 0 0 0 0 0 0 0 0 0 0
40026- 0 0 0 0 0 0 0 0 0 0 0 0
40027- 0 0 0 0 0 0 0 0 0 0 0 0
40028- 0 0 0 0 0 0 0 0 0 0 0 0
40029- 0 0 0 0 0 0 0 0 0 0 0 0
40030- 0 0 0 0 0 0 0 0 0 0 0 0
40031- 6 6 6 14 14 14 38 38 38 82 82 82
40032- 34 34 34 2 2 6 2 2 6 2 2 6
40033- 42 42 42 195 195 195 246 246 246 253 253 253
40034-253 253 253 253 253 253 253 253 253 250 250 250
40035-242 242 242 242 242 242 250 250 250 253 253 253
40036-253 253 253 253 253 253 253 253 253 253 253 253
40037-253 253 253 250 250 250 246 246 246 238 238 238
40038-226 226 226 231 231 231 101 101 101 6 6 6
40039- 2 2 6 2 2 6 2 2 6 2 2 6
40040- 2 2 6 2 2 6 2 2 6 2 2 6
40041- 38 38 38 82 82 82 42 42 42 14 14 14
40042- 6 6 6 0 0 0 0 0 0 0 0 0
40043- 0 0 0 0 0 0 0 0 0 0 0 0
40044- 0 0 0 0 0 0 0 0 0 0 0 0
40045- 0 0 0 0 0 0 0 0 0 0 0 0
40046- 0 0 0 0 0 0 0 0 0 0 0 0
40047- 0 0 0 0 0 0 0 0 0 0 0 0
40048- 0 0 0 0 0 0 0 0 0 0 0 0
40049- 0 0 0 0 0 0 0 0 0 0 0 0
40050- 0 0 0 0 0 0 0 0 0 0 0 0
40051- 10 10 10 26 26 26 62 62 62 66 66 66
40052- 2 2 6 2 2 6 2 2 6 6 6 6
40053- 70 70 70 170 170 170 206 206 206 234 234 234
40054-246 246 246 250 250 250 250 250 250 238 238 238
40055-226 226 226 231 231 231 238 238 238 250 250 250
40056-250 250 250 250 250 250 246 246 246 231 231 231
40057-214 214 214 206 206 206 202 202 202 202 202 202
40058-198 198 198 202 202 202 182 182 182 18 18 18
40059- 2 2 6 2 2 6 2 2 6 2 2 6
40060- 2 2 6 2 2 6 2 2 6 2 2 6
40061- 2 2 6 62 62 62 66 66 66 30 30 30
40062- 10 10 10 0 0 0 0 0 0 0 0 0
40063- 0 0 0 0 0 0 0 0 0 0 0 0
40064- 0 0 0 0 0 0 0 0 0 0 0 0
40065- 0 0 0 0 0 0 0 0 0 0 0 0
40066- 0 0 0 0 0 0 0 0 0 0 0 0
40067- 0 0 0 0 0 0 0 0 0 0 0 0
40068- 0 0 0 0 0 0 0 0 0 0 0 0
40069- 0 0 0 0 0 0 0 0 0 0 0 0
40070- 0 0 0 0 0 0 0 0 0 0 0 0
40071- 14 14 14 42 42 42 82 82 82 18 18 18
40072- 2 2 6 2 2 6 2 2 6 10 10 10
40073- 94 94 94 182 182 182 218 218 218 242 242 242
40074-250 250 250 253 253 253 253 253 253 250 250 250
40075-234 234 234 253 253 253 253 253 253 253 253 253
40076-253 253 253 253 253 253 253 253 253 246 246 246
40077-238 238 238 226 226 226 210 210 210 202 202 202
40078-195 195 195 195 195 195 210 210 210 158 158 158
40079- 6 6 6 14 14 14 50 50 50 14 14 14
40080- 2 2 6 2 2 6 2 2 6 2 2 6
40081- 2 2 6 6 6 6 86 86 86 46 46 46
40082- 18 18 18 6 6 6 0 0 0 0 0 0
40083- 0 0 0 0 0 0 0 0 0 0 0 0
40084- 0 0 0 0 0 0 0 0 0 0 0 0
40085- 0 0 0 0 0 0 0 0 0 0 0 0
40086- 0 0 0 0 0 0 0 0 0 0 0 0
40087- 0 0 0 0 0 0 0 0 0 0 0 0
40088- 0 0 0 0 0 0 0 0 0 0 0 0
40089- 0 0 0 0 0 0 0 0 0 0 0 0
40090- 0 0 0 0 0 0 0 0 0 6 6 6
40091- 22 22 22 54 54 54 70 70 70 2 2 6
40092- 2 2 6 10 10 10 2 2 6 22 22 22
40093-166 166 166 231 231 231 250 250 250 253 253 253
40094-253 253 253 253 253 253 253 253 253 250 250 250
40095-242 242 242 253 253 253 253 253 253 253 253 253
40096-253 253 253 253 253 253 253 253 253 253 253 253
40097-253 253 253 253 253 253 253 253 253 246 246 246
40098-231 231 231 206 206 206 198 198 198 226 226 226
40099- 94 94 94 2 2 6 6 6 6 38 38 38
40100- 30 30 30 2 2 6 2 2 6 2 2 6
40101- 2 2 6 2 2 6 62 62 62 66 66 66
40102- 26 26 26 10 10 10 0 0 0 0 0 0
40103- 0 0 0 0 0 0 0 0 0 0 0 0
40104- 0 0 0 0 0 0 0 0 0 0 0 0
40105- 0 0 0 0 0 0 0 0 0 0 0 0
40106- 0 0 0 0 0 0 0 0 0 0 0 0
40107- 0 0 0 0 0 0 0 0 0 0 0 0
40108- 0 0 0 0 0 0 0 0 0 0 0 0
40109- 0 0 0 0 0 0 0 0 0 0 0 0
40110- 0 0 0 0 0 0 0 0 0 10 10 10
40111- 30 30 30 74 74 74 50 50 50 2 2 6
40112- 26 26 26 26 26 26 2 2 6 106 106 106
40113-238 238 238 253 253 253 253 253 253 253 253 253
40114-253 253 253 253 253 253 253 253 253 253 253 253
40115-253 253 253 253 253 253 253 253 253 253 253 253
40116-253 253 253 253 253 253 253 253 253 253 253 253
40117-253 253 253 253 253 253 253 253 253 253 253 253
40118-253 253 253 246 246 246 218 218 218 202 202 202
40119-210 210 210 14 14 14 2 2 6 2 2 6
40120- 30 30 30 22 22 22 2 2 6 2 2 6
40121- 2 2 6 2 2 6 18 18 18 86 86 86
40122- 42 42 42 14 14 14 0 0 0 0 0 0
40123- 0 0 0 0 0 0 0 0 0 0 0 0
40124- 0 0 0 0 0 0 0 0 0 0 0 0
40125- 0 0 0 0 0 0 0 0 0 0 0 0
40126- 0 0 0 0 0 0 0 0 0 0 0 0
40127- 0 0 0 0 0 0 0 0 0 0 0 0
40128- 0 0 0 0 0 0 0 0 0 0 0 0
40129- 0 0 0 0 0 0 0 0 0 0 0 0
40130- 0 0 0 0 0 0 0 0 0 14 14 14
40131- 42 42 42 90 90 90 22 22 22 2 2 6
40132- 42 42 42 2 2 6 18 18 18 218 218 218
40133-253 253 253 253 253 253 253 253 253 253 253 253
40134-253 253 253 253 253 253 253 253 253 253 253 253
40135-253 253 253 253 253 253 253 253 253 253 253 253
40136-253 253 253 253 253 253 253 253 253 253 253 253
40137-253 253 253 253 253 253 253 253 253 253 253 253
40138-253 253 253 253 253 253 250 250 250 221 221 221
40139-218 218 218 101 101 101 2 2 6 14 14 14
40140- 18 18 18 38 38 38 10 10 10 2 2 6
40141- 2 2 6 2 2 6 2 2 6 78 78 78
40142- 58 58 58 22 22 22 6 6 6 0 0 0
40143- 0 0 0 0 0 0 0 0 0 0 0 0
40144- 0 0 0 0 0 0 0 0 0 0 0 0
40145- 0 0 0 0 0 0 0 0 0 0 0 0
40146- 0 0 0 0 0 0 0 0 0 0 0 0
40147- 0 0 0 0 0 0 0 0 0 0 0 0
40148- 0 0 0 0 0 0 0 0 0 0 0 0
40149- 0 0 0 0 0 0 0 0 0 0 0 0
40150- 0 0 0 0 0 0 6 6 6 18 18 18
40151- 54 54 54 82 82 82 2 2 6 26 26 26
40152- 22 22 22 2 2 6 123 123 123 253 253 253
40153-253 253 253 253 253 253 253 253 253 253 253 253
40154-253 253 253 253 253 253 253 253 253 253 253 253
40155-253 253 253 253 253 253 253 253 253 253 253 253
40156-253 253 253 253 253 253 253 253 253 253 253 253
40157-253 253 253 253 253 253 253 253 253 253 253 253
40158-253 253 253 253 253 253 253 253 253 250 250 250
40159-238 238 238 198 198 198 6 6 6 38 38 38
40160- 58 58 58 26 26 26 38 38 38 2 2 6
40161- 2 2 6 2 2 6 2 2 6 46 46 46
40162- 78 78 78 30 30 30 10 10 10 0 0 0
40163- 0 0 0 0 0 0 0 0 0 0 0 0
40164- 0 0 0 0 0 0 0 0 0 0 0 0
40165- 0 0 0 0 0 0 0 0 0 0 0 0
40166- 0 0 0 0 0 0 0 0 0 0 0 0
40167- 0 0 0 0 0 0 0 0 0 0 0 0
40168- 0 0 0 0 0 0 0 0 0 0 0 0
40169- 0 0 0 0 0 0 0 0 0 0 0 0
40170- 0 0 0 0 0 0 10 10 10 30 30 30
40171- 74 74 74 58 58 58 2 2 6 42 42 42
40172- 2 2 6 22 22 22 231 231 231 253 253 253
40173-253 253 253 253 253 253 253 253 253 253 253 253
40174-253 253 253 253 253 253 253 253 253 250 250 250
40175-253 253 253 253 253 253 253 253 253 253 253 253
40176-253 253 253 253 253 253 253 253 253 253 253 253
40177-253 253 253 253 253 253 253 253 253 253 253 253
40178-253 253 253 253 253 253 253 253 253 253 253 253
40179-253 253 253 246 246 246 46 46 46 38 38 38
40180- 42 42 42 14 14 14 38 38 38 14 14 14
40181- 2 2 6 2 2 6 2 2 6 6 6 6
40182- 86 86 86 46 46 46 14 14 14 0 0 0
40183- 0 0 0 0 0 0 0 0 0 0 0 0
40184- 0 0 0 0 0 0 0 0 0 0 0 0
40185- 0 0 0 0 0 0 0 0 0 0 0 0
40186- 0 0 0 0 0 0 0 0 0 0 0 0
40187- 0 0 0 0 0 0 0 0 0 0 0 0
40188- 0 0 0 0 0 0 0 0 0 0 0 0
40189- 0 0 0 0 0 0 0 0 0 0 0 0
40190- 0 0 0 6 6 6 14 14 14 42 42 42
40191- 90 90 90 18 18 18 18 18 18 26 26 26
40192- 2 2 6 116 116 116 253 253 253 253 253 253
40193-253 253 253 253 253 253 253 253 253 253 253 253
40194-253 253 253 253 253 253 250 250 250 238 238 238
40195-253 253 253 253 253 253 253 253 253 253 253 253
40196-253 253 253 253 253 253 253 253 253 253 253 253
40197-253 253 253 253 253 253 253 253 253 253 253 253
40198-253 253 253 253 253 253 253 253 253 253 253 253
40199-253 253 253 253 253 253 94 94 94 6 6 6
40200- 2 2 6 2 2 6 10 10 10 34 34 34
40201- 2 2 6 2 2 6 2 2 6 2 2 6
40202- 74 74 74 58 58 58 22 22 22 6 6 6
40203- 0 0 0 0 0 0 0 0 0 0 0 0
40204- 0 0 0 0 0 0 0 0 0 0 0 0
40205- 0 0 0 0 0 0 0 0 0 0 0 0
40206- 0 0 0 0 0 0 0 0 0 0 0 0
40207- 0 0 0 0 0 0 0 0 0 0 0 0
40208- 0 0 0 0 0 0 0 0 0 0 0 0
40209- 0 0 0 0 0 0 0 0 0 0 0 0
40210- 0 0 0 10 10 10 26 26 26 66 66 66
40211- 82 82 82 2 2 6 38 38 38 6 6 6
40212- 14 14 14 210 210 210 253 253 253 253 253 253
40213-253 253 253 253 253 253 253 253 253 253 253 253
40214-253 253 253 253 253 253 246 246 246 242 242 242
40215-253 253 253 253 253 253 253 253 253 253 253 253
40216-253 253 253 253 253 253 253 253 253 253 253 253
40217-253 253 253 253 253 253 253 253 253 253 253 253
40218-253 253 253 253 253 253 253 253 253 253 253 253
40219-253 253 253 253 253 253 144 144 144 2 2 6
40220- 2 2 6 2 2 6 2 2 6 46 46 46
40221- 2 2 6 2 2 6 2 2 6 2 2 6
40222- 42 42 42 74 74 74 30 30 30 10 10 10
40223- 0 0 0 0 0 0 0 0 0 0 0 0
40224- 0 0 0 0 0 0 0 0 0 0 0 0
40225- 0 0 0 0 0 0 0 0 0 0 0 0
40226- 0 0 0 0 0 0 0 0 0 0 0 0
40227- 0 0 0 0 0 0 0 0 0 0 0 0
40228- 0 0 0 0 0 0 0 0 0 0 0 0
40229- 0 0 0 0 0 0 0 0 0 0 0 0
40230- 6 6 6 14 14 14 42 42 42 90 90 90
40231- 26 26 26 6 6 6 42 42 42 2 2 6
40232- 74 74 74 250 250 250 253 253 253 253 253 253
40233-253 253 253 253 253 253 253 253 253 253 253 253
40234-253 253 253 253 253 253 242 242 242 242 242 242
40235-253 253 253 253 253 253 253 253 253 253 253 253
40236-253 253 253 253 253 253 253 253 253 253 253 253
40237-253 253 253 253 253 253 253 253 253 253 253 253
40238-253 253 253 253 253 253 253 253 253 253 253 253
40239-253 253 253 253 253 253 182 182 182 2 2 6
40240- 2 2 6 2 2 6 2 2 6 46 46 46
40241- 2 2 6 2 2 6 2 2 6 2 2 6
40242- 10 10 10 86 86 86 38 38 38 10 10 10
40243- 0 0 0 0 0 0 0 0 0 0 0 0
40244- 0 0 0 0 0 0 0 0 0 0 0 0
40245- 0 0 0 0 0 0 0 0 0 0 0 0
40246- 0 0 0 0 0 0 0 0 0 0 0 0
40247- 0 0 0 0 0 0 0 0 0 0 0 0
40248- 0 0 0 0 0 0 0 0 0 0 0 0
40249- 0 0 0 0 0 0 0 0 0 0 0 0
40250- 10 10 10 26 26 26 66 66 66 82 82 82
40251- 2 2 6 22 22 22 18 18 18 2 2 6
40252-149 149 149 253 253 253 253 253 253 253 253 253
40253-253 253 253 253 253 253 253 253 253 253 253 253
40254-253 253 253 253 253 253 234 234 234 242 242 242
40255-253 253 253 253 253 253 253 253 253 253 253 253
40256-253 253 253 253 253 253 253 253 253 253 253 253
40257-253 253 253 253 253 253 253 253 253 253 253 253
40258-253 253 253 253 253 253 253 253 253 253 253 253
40259-253 253 253 253 253 253 206 206 206 2 2 6
40260- 2 2 6 2 2 6 2 2 6 38 38 38
40261- 2 2 6 2 2 6 2 2 6 2 2 6
40262- 6 6 6 86 86 86 46 46 46 14 14 14
40263- 0 0 0 0 0 0 0 0 0 0 0 0
40264- 0 0 0 0 0 0 0 0 0 0 0 0
40265- 0 0 0 0 0 0 0 0 0 0 0 0
40266- 0 0 0 0 0 0 0 0 0 0 0 0
40267- 0 0 0 0 0 0 0 0 0 0 0 0
40268- 0 0 0 0 0 0 0 0 0 0 0 0
40269- 0 0 0 0 0 0 0 0 0 6 6 6
40270- 18 18 18 46 46 46 86 86 86 18 18 18
40271- 2 2 6 34 34 34 10 10 10 6 6 6
40272-210 210 210 253 253 253 253 253 253 253 253 253
40273-253 253 253 253 253 253 253 253 253 253 253 253
40274-253 253 253 253 253 253 234 234 234 242 242 242
40275-253 253 253 253 253 253 253 253 253 253 253 253
40276-253 253 253 253 253 253 253 253 253 253 253 253
40277-253 253 253 253 253 253 253 253 253 253 253 253
40278-253 253 253 253 253 253 253 253 253 253 253 253
40279-253 253 253 253 253 253 221 221 221 6 6 6
40280- 2 2 6 2 2 6 6 6 6 30 30 30
40281- 2 2 6 2 2 6 2 2 6 2 2 6
40282- 2 2 6 82 82 82 54 54 54 18 18 18
40283- 6 6 6 0 0 0 0 0 0 0 0 0
40284- 0 0 0 0 0 0 0 0 0 0 0 0
40285- 0 0 0 0 0 0 0 0 0 0 0 0
40286- 0 0 0 0 0 0 0 0 0 0 0 0
40287- 0 0 0 0 0 0 0 0 0 0 0 0
40288- 0 0 0 0 0 0 0 0 0 0 0 0
40289- 0 0 0 0 0 0 0 0 0 10 10 10
40290- 26 26 26 66 66 66 62 62 62 2 2 6
40291- 2 2 6 38 38 38 10 10 10 26 26 26
40292-238 238 238 253 253 253 253 253 253 253 253 253
40293-253 253 253 253 253 253 253 253 253 253 253 253
40294-253 253 253 253 253 253 231 231 231 238 238 238
40295-253 253 253 253 253 253 253 253 253 253 253 253
40296-253 253 253 253 253 253 253 253 253 253 253 253
40297-253 253 253 253 253 253 253 253 253 253 253 253
40298-253 253 253 253 253 253 253 253 253 253 253 253
40299-253 253 253 253 253 253 231 231 231 6 6 6
40300- 2 2 6 2 2 6 10 10 10 30 30 30
40301- 2 2 6 2 2 6 2 2 6 2 2 6
40302- 2 2 6 66 66 66 58 58 58 22 22 22
40303- 6 6 6 0 0 0 0 0 0 0 0 0
40304- 0 0 0 0 0 0 0 0 0 0 0 0
40305- 0 0 0 0 0 0 0 0 0 0 0 0
40306- 0 0 0 0 0 0 0 0 0 0 0 0
40307- 0 0 0 0 0 0 0 0 0 0 0 0
40308- 0 0 0 0 0 0 0 0 0 0 0 0
40309- 0 0 0 0 0 0 0 0 0 10 10 10
40310- 38 38 38 78 78 78 6 6 6 2 2 6
40311- 2 2 6 46 46 46 14 14 14 42 42 42
40312-246 246 246 253 253 253 253 253 253 253 253 253
40313-253 253 253 253 253 253 253 253 253 253 253 253
40314-253 253 253 253 253 253 231 231 231 242 242 242
40315-253 253 253 253 253 253 253 253 253 253 253 253
40316-253 253 253 253 253 253 253 253 253 253 253 253
40317-253 253 253 253 253 253 253 253 253 253 253 253
40318-253 253 253 253 253 253 253 253 253 253 253 253
40319-253 253 253 253 253 253 234 234 234 10 10 10
40320- 2 2 6 2 2 6 22 22 22 14 14 14
40321- 2 2 6 2 2 6 2 2 6 2 2 6
40322- 2 2 6 66 66 66 62 62 62 22 22 22
40323- 6 6 6 0 0 0 0 0 0 0 0 0
40324- 0 0 0 0 0 0 0 0 0 0 0 0
40325- 0 0 0 0 0 0 0 0 0 0 0 0
40326- 0 0 0 0 0 0 0 0 0 0 0 0
40327- 0 0 0 0 0 0 0 0 0 0 0 0
40328- 0 0 0 0 0 0 0 0 0 0 0 0
40329- 0 0 0 0 0 0 6 6 6 18 18 18
40330- 50 50 50 74 74 74 2 2 6 2 2 6
40331- 14 14 14 70 70 70 34 34 34 62 62 62
40332-250 250 250 253 253 253 253 253 253 253 253 253
40333-253 253 253 253 253 253 253 253 253 253 253 253
40334-253 253 253 253 253 253 231 231 231 246 246 246
40335-253 253 253 253 253 253 253 253 253 253 253 253
40336-253 253 253 253 253 253 253 253 253 253 253 253
40337-253 253 253 253 253 253 253 253 253 253 253 253
40338-253 253 253 253 253 253 253 253 253 253 253 253
40339-253 253 253 253 253 253 234 234 234 14 14 14
40340- 2 2 6 2 2 6 30 30 30 2 2 6
40341- 2 2 6 2 2 6 2 2 6 2 2 6
40342- 2 2 6 66 66 66 62 62 62 22 22 22
40343- 6 6 6 0 0 0 0 0 0 0 0 0
40344- 0 0 0 0 0 0 0 0 0 0 0 0
40345- 0 0 0 0 0 0 0 0 0 0 0 0
40346- 0 0 0 0 0 0 0 0 0 0 0 0
40347- 0 0 0 0 0 0 0 0 0 0 0 0
40348- 0 0 0 0 0 0 0 0 0 0 0 0
40349- 0 0 0 0 0 0 6 6 6 18 18 18
40350- 54 54 54 62 62 62 2 2 6 2 2 6
40351- 2 2 6 30 30 30 46 46 46 70 70 70
40352-250 250 250 253 253 253 253 253 253 253 253 253
40353-253 253 253 253 253 253 253 253 253 253 253 253
40354-253 253 253 253 253 253 231 231 231 246 246 246
40355-253 253 253 253 253 253 253 253 253 253 253 253
40356-253 253 253 253 253 253 253 253 253 253 253 253
40357-253 253 253 253 253 253 253 253 253 253 253 253
40358-253 253 253 253 253 253 253 253 253 253 253 253
40359-253 253 253 253 253 253 226 226 226 10 10 10
40360- 2 2 6 6 6 6 30 30 30 2 2 6
40361- 2 2 6 2 2 6 2 2 6 2 2 6
40362- 2 2 6 66 66 66 58 58 58 22 22 22
40363- 6 6 6 0 0 0 0 0 0 0 0 0
40364- 0 0 0 0 0 0 0 0 0 0 0 0
40365- 0 0 0 0 0 0 0 0 0 0 0 0
40366- 0 0 0 0 0 0 0 0 0 0 0 0
40367- 0 0 0 0 0 0 0 0 0 0 0 0
40368- 0 0 0 0 0 0 0 0 0 0 0 0
40369- 0 0 0 0 0 0 6 6 6 22 22 22
40370- 58 58 58 62 62 62 2 2 6 2 2 6
40371- 2 2 6 2 2 6 30 30 30 78 78 78
40372-250 250 250 253 253 253 253 253 253 253 253 253
40373-253 253 253 253 253 253 253 253 253 253 253 253
40374-253 253 253 253 253 253 231 231 231 246 246 246
40375-253 253 253 253 253 253 253 253 253 253 253 253
40376-253 253 253 253 253 253 253 253 253 253 253 253
40377-253 253 253 253 253 253 253 253 253 253 253 253
40378-253 253 253 253 253 253 253 253 253 253 253 253
40379-253 253 253 253 253 253 206 206 206 2 2 6
40380- 22 22 22 34 34 34 18 14 6 22 22 22
40381- 26 26 26 18 18 18 6 6 6 2 2 6
40382- 2 2 6 82 82 82 54 54 54 18 18 18
40383- 6 6 6 0 0 0 0 0 0 0 0 0
40384- 0 0 0 0 0 0 0 0 0 0 0 0
40385- 0 0 0 0 0 0 0 0 0 0 0 0
40386- 0 0 0 0 0 0 0 0 0 0 0 0
40387- 0 0 0 0 0 0 0 0 0 0 0 0
40388- 0 0 0 0 0 0 0 0 0 0 0 0
40389- 0 0 0 0 0 0 6 6 6 26 26 26
40390- 62 62 62 106 106 106 74 54 14 185 133 11
40391-210 162 10 121 92 8 6 6 6 62 62 62
40392-238 238 238 253 253 253 253 253 253 253 253 253
40393-253 253 253 253 253 253 253 253 253 253 253 253
40394-253 253 253 253 253 253 231 231 231 246 246 246
40395-253 253 253 253 253 253 253 253 253 253 253 253
40396-253 253 253 253 253 253 253 253 253 253 253 253
40397-253 253 253 253 253 253 253 253 253 253 253 253
40398-253 253 253 253 253 253 253 253 253 253 253 253
40399-253 253 253 253 253 253 158 158 158 18 18 18
40400- 14 14 14 2 2 6 2 2 6 2 2 6
40401- 6 6 6 18 18 18 66 66 66 38 38 38
40402- 6 6 6 94 94 94 50 50 50 18 18 18
40403- 6 6 6 0 0 0 0 0 0 0 0 0
40404- 0 0 0 0 0 0 0 0 0 0 0 0
40405- 0 0 0 0 0 0 0 0 0 0 0 0
40406- 0 0 0 0 0 0 0 0 0 0 0 0
40407- 0 0 0 0 0 0 0 0 0 0 0 0
40408- 0 0 0 0 0 0 0 0 0 6 6 6
40409- 10 10 10 10 10 10 18 18 18 38 38 38
40410- 78 78 78 142 134 106 216 158 10 242 186 14
40411-246 190 14 246 190 14 156 118 10 10 10 10
40412- 90 90 90 238 238 238 253 253 253 253 253 253
40413-253 253 253 253 253 253 253 253 253 253 253 253
40414-253 253 253 253 253 253 231 231 231 250 250 250
40415-253 253 253 253 253 253 253 253 253 253 253 253
40416-253 253 253 253 253 253 253 253 253 253 253 253
40417-253 253 253 253 253 253 253 253 253 253 253 253
40418-253 253 253 253 253 253 253 253 253 246 230 190
40419-238 204 91 238 204 91 181 142 44 37 26 9
40420- 2 2 6 2 2 6 2 2 6 2 2 6
40421- 2 2 6 2 2 6 38 38 38 46 46 46
40422- 26 26 26 106 106 106 54 54 54 18 18 18
40423- 6 6 6 0 0 0 0 0 0 0 0 0
40424- 0 0 0 0 0 0 0 0 0 0 0 0
40425- 0 0 0 0 0 0 0 0 0 0 0 0
40426- 0 0 0 0 0 0 0 0 0 0 0 0
40427- 0 0 0 0 0 0 0 0 0 0 0 0
40428- 0 0 0 6 6 6 14 14 14 22 22 22
40429- 30 30 30 38 38 38 50 50 50 70 70 70
40430-106 106 106 190 142 34 226 170 11 242 186 14
40431-246 190 14 246 190 14 246 190 14 154 114 10
40432- 6 6 6 74 74 74 226 226 226 253 253 253
40433-253 253 253 253 253 253 253 253 253 253 253 253
40434-253 253 253 253 253 253 231 231 231 250 250 250
40435-253 253 253 253 253 253 253 253 253 253 253 253
40436-253 253 253 253 253 253 253 253 253 253 253 253
40437-253 253 253 253 253 253 253 253 253 253 253 253
40438-253 253 253 253 253 253 253 253 253 228 184 62
40439-241 196 14 241 208 19 232 195 16 38 30 10
40440- 2 2 6 2 2 6 2 2 6 2 2 6
40441- 2 2 6 6 6 6 30 30 30 26 26 26
40442-203 166 17 154 142 90 66 66 66 26 26 26
40443- 6 6 6 0 0 0 0 0 0 0 0 0
40444- 0 0 0 0 0 0 0 0 0 0 0 0
40445- 0 0 0 0 0 0 0 0 0 0 0 0
40446- 0 0 0 0 0 0 0 0 0 0 0 0
40447- 0 0 0 0 0 0 0 0 0 0 0 0
40448- 6 6 6 18 18 18 38 38 38 58 58 58
40449- 78 78 78 86 86 86 101 101 101 123 123 123
40450-175 146 61 210 150 10 234 174 13 246 186 14
40451-246 190 14 246 190 14 246 190 14 238 190 10
40452-102 78 10 2 2 6 46 46 46 198 198 198
40453-253 253 253 253 253 253 253 253 253 253 253 253
40454-253 253 253 253 253 253 234 234 234 242 242 242
40455-253 253 253 253 253 253 253 253 253 253 253 253
40456-253 253 253 253 253 253 253 253 253 253 253 253
40457-253 253 253 253 253 253 253 253 253 253 253 253
40458-253 253 253 253 253 253 253 253 253 224 178 62
40459-242 186 14 241 196 14 210 166 10 22 18 6
40460- 2 2 6 2 2 6 2 2 6 2 2 6
40461- 2 2 6 2 2 6 6 6 6 121 92 8
40462-238 202 15 232 195 16 82 82 82 34 34 34
40463- 10 10 10 0 0 0 0 0 0 0 0 0
40464- 0 0 0 0 0 0 0 0 0 0 0 0
40465- 0 0 0 0 0 0 0 0 0 0 0 0
40466- 0 0 0 0 0 0 0 0 0 0 0 0
40467- 0 0 0 0 0 0 0 0 0 0 0 0
40468- 14 14 14 38 38 38 70 70 70 154 122 46
40469-190 142 34 200 144 11 197 138 11 197 138 11
40470-213 154 11 226 170 11 242 186 14 246 190 14
40471-246 190 14 246 190 14 246 190 14 246 190 14
40472-225 175 15 46 32 6 2 2 6 22 22 22
40473-158 158 158 250 250 250 253 253 253 253 253 253
40474-253 253 253 253 253 253 253 253 253 253 253 253
40475-253 253 253 253 253 253 253 253 253 253 253 253
40476-253 253 253 253 253 253 253 253 253 253 253 253
40477-253 253 253 253 253 253 253 253 253 253 253 253
40478-253 253 253 250 250 250 242 242 242 224 178 62
40479-239 182 13 236 186 11 213 154 11 46 32 6
40480- 2 2 6 2 2 6 2 2 6 2 2 6
40481- 2 2 6 2 2 6 61 42 6 225 175 15
40482-238 190 10 236 186 11 112 100 78 42 42 42
40483- 14 14 14 0 0 0 0 0 0 0 0 0
40484- 0 0 0 0 0 0 0 0 0 0 0 0
40485- 0 0 0 0 0 0 0 0 0 0 0 0
40486- 0 0 0 0 0 0 0 0 0 0 0 0
40487- 0 0 0 0 0 0 0 0 0 6 6 6
40488- 22 22 22 54 54 54 154 122 46 213 154 11
40489-226 170 11 230 174 11 226 170 11 226 170 11
40490-236 178 12 242 186 14 246 190 14 246 190 14
40491-246 190 14 246 190 14 246 190 14 246 190 14
40492-241 196 14 184 144 12 10 10 10 2 2 6
40493- 6 6 6 116 116 116 242 242 242 253 253 253
40494-253 253 253 253 253 253 253 253 253 253 253 253
40495-253 253 253 253 253 253 253 253 253 253 253 253
40496-253 253 253 253 253 253 253 253 253 253 253 253
40497-253 253 253 253 253 253 253 253 253 253 253 253
40498-253 253 253 231 231 231 198 198 198 214 170 54
40499-236 178 12 236 178 12 210 150 10 137 92 6
40500- 18 14 6 2 2 6 2 2 6 2 2 6
40501- 6 6 6 70 47 6 200 144 11 236 178 12
40502-239 182 13 239 182 13 124 112 88 58 58 58
40503- 22 22 22 6 6 6 0 0 0 0 0 0
40504- 0 0 0 0 0 0 0 0 0 0 0 0
40505- 0 0 0 0 0 0 0 0 0 0 0 0
40506- 0 0 0 0 0 0 0 0 0 0 0 0
40507- 0 0 0 0 0 0 0 0 0 10 10 10
40508- 30 30 30 70 70 70 180 133 36 226 170 11
40509-239 182 13 242 186 14 242 186 14 246 186 14
40510-246 190 14 246 190 14 246 190 14 246 190 14
40511-246 190 14 246 190 14 246 190 14 246 190 14
40512-246 190 14 232 195 16 98 70 6 2 2 6
40513- 2 2 6 2 2 6 66 66 66 221 221 221
40514-253 253 253 253 253 253 253 253 253 253 253 253
40515-253 253 253 253 253 253 253 253 253 253 253 253
40516-253 253 253 253 253 253 253 253 253 253 253 253
40517-253 253 253 253 253 253 253 253 253 253 253 253
40518-253 253 253 206 206 206 198 198 198 214 166 58
40519-230 174 11 230 174 11 216 158 10 192 133 9
40520-163 110 8 116 81 8 102 78 10 116 81 8
40521-167 114 7 197 138 11 226 170 11 239 182 13
40522-242 186 14 242 186 14 162 146 94 78 78 78
40523- 34 34 34 14 14 14 6 6 6 0 0 0
40524- 0 0 0 0 0 0 0 0 0 0 0 0
40525- 0 0 0 0 0 0 0 0 0 0 0 0
40526- 0 0 0 0 0 0 0 0 0 0 0 0
40527- 0 0 0 0 0 0 0 0 0 6 6 6
40528- 30 30 30 78 78 78 190 142 34 226 170 11
40529-239 182 13 246 190 14 246 190 14 246 190 14
40530-246 190 14 246 190 14 246 190 14 246 190 14
40531-246 190 14 246 190 14 246 190 14 246 190 14
40532-246 190 14 241 196 14 203 166 17 22 18 6
40533- 2 2 6 2 2 6 2 2 6 38 38 38
40534-218 218 218 253 253 253 253 253 253 253 253 253
40535-253 253 253 253 253 253 253 253 253 253 253 253
40536-253 253 253 253 253 253 253 253 253 253 253 253
40537-253 253 253 253 253 253 253 253 253 253 253 253
40538-250 250 250 206 206 206 198 198 198 202 162 69
40539-226 170 11 236 178 12 224 166 10 210 150 10
40540-200 144 11 197 138 11 192 133 9 197 138 11
40541-210 150 10 226 170 11 242 186 14 246 190 14
40542-246 190 14 246 186 14 225 175 15 124 112 88
40543- 62 62 62 30 30 30 14 14 14 6 6 6
40544- 0 0 0 0 0 0 0 0 0 0 0 0
40545- 0 0 0 0 0 0 0 0 0 0 0 0
40546- 0 0 0 0 0 0 0 0 0 0 0 0
40547- 0 0 0 0 0 0 0 0 0 10 10 10
40548- 30 30 30 78 78 78 174 135 50 224 166 10
40549-239 182 13 246 190 14 246 190 14 246 190 14
40550-246 190 14 246 190 14 246 190 14 246 190 14
40551-246 190 14 246 190 14 246 190 14 246 190 14
40552-246 190 14 246 190 14 241 196 14 139 102 15
40553- 2 2 6 2 2 6 2 2 6 2 2 6
40554- 78 78 78 250 250 250 253 253 253 253 253 253
40555-253 253 253 253 253 253 253 253 253 253 253 253
40556-253 253 253 253 253 253 253 253 253 253 253 253
40557-253 253 253 253 253 253 253 253 253 253 253 253
40558-250 250 250 214 214 214 198 198 198 190 150 46
40559-219 162 10 236 178 12 234 174 13 224 166 10
40560-216 158 10 213 154 11 213 154 11 216 158 10
40561-226 170 11 239 182 13 246 190 14 246 190 14
40562-246 190 14 246 190 14 242 186 14 206 162 42
40563-101 101 101 58 58 58 30 30 30 14 14 14
40564- 6 6 6 0 0 0 0 0 0 0 0 0
40565- 0 0 0 0 0 0 0 0 0 0 0 0
40566- 0 0 0 0 0 0 0 0 0 0 0 0
40567- 0 0 0 0 0 0 0 0 0 10 10 10
40568- 30 30 30 74 74 74 174 135 50 216 158 10
40569-236 178 12 246 190 14 246 190 14 246 190 14
40570-246 190 14 246 190 14 246 190 14 246 190 14
40571-246 190 14 246 190 14 246 190 14 246 190 14
40572-246 190 14 246 190 14 241 196 14 226 184 13
40573- 61 42 6 2 2 6 2 2 6 2 2 6
40574- 22 22 22 238 238 238 253 253 253 253 253 253
40575-253 253 253 253 253 253 253 253 253 253 253 253
40576-253 253 253 253 253 253 253 253 253 253 253 253
40577-253 253 253 253 253 253 253 253 253 253 253 253
40578-253 253 253 226 226 226 187 187 187 180 133 36
40579-216 158 10 236 178 12 239 182 13 236 178 12
40580-230 174 11 226 170 11 226 170 11 230 174 11
40581-236 178 12 242 186 14 246 190 14 246 190 14
40582-246 190 14 246 190 14 246 186 14 239 182 13
40583-206 162 42 106 106 106 66 66 66 34 34 34
40584- 14 14 14 6 6 6 0 0 0 0 0 0
40585- 0 0 0 0 0 0 0 0 0 0 0 0
40586- 0 0 0 0 0 0 0 0 0 0 0 0
40587- 0 0 0 0 0 0 0 0 0 6 6 6
40588- 26 26 26 70 70 70 163 133 67 213 154 11
40589-236 178 12 246 190 14 246 190 14 246 190 14
40590-246 190 14 246 190 14 246 190 14 246 190 14
40591-246 190 14 246 190 14 246 190 14 246 190 14
40592-246 190 14 246 190 14 246 190 14 241 196 14
40593-190 146 13 18 14 6 2 2 6 2 2 6
40594- 46 46 46 246 246 246 253 253 253 253 253 253
40595-253 253 253 253 253 253 253 253 253 253 253 253
40596-253 253 253 253 253 253 253 253 253 253 253 253
40597-253 253 253 253 253 253 253 253 253 253 253 253
40598-253 253 253 221 221 221 86 86 86 156 107 11
40599-216 158 10 236 178 12 242 186 14 246 186 14
40600-242 186 14 239 182 13 239 182 13 242 186 14
40601-242 186 14 246 186 14 246 190 14 246 190 14
40602-246 190 14 246 190 14 246 190 14 246 190 14
40603-242 186 14 225 175 15 142 122 72 66 66 66
40604- 30 30 30 10 10 10 0 0 0 0 0 0
40605- 0 0 0 0 0 0 0 0 0 0 0 0
40606- 0 0 0 0 0 0 0 0 0 0 0 0
40607- 0 0 0 0 0 0 0 0 0 6 6 6
40608- 26 26 26 70 70 70 163 133 67 210 150 10
40609-236 178 12 246 190 14 246 190 14 246 190 14
40610-246 190 14 246 190 14 246 190 14 246 190 14
40611-246 190 14 246 190 14 246 190 14 246 190 14
40612-246 190 14 246 190 14 246 190 14 246 190 14
40613-232 195 16 121 92 8 34 34 34 106 106 106
40614-221 221 221 253 253 253 253 253 253 253 253 253
40615-253 253 253 253 253 253 253 253 253 253 253 253
40616-253 253 253 253 253 253 253 253 253 253 253 253
40617-253 253 253 253 253 253 253 253 253 253 253 253
40618-242 242 242 82 82 82 18 14 6 163 110 8
40619-216 158 10 236 178 12 242 186 14 246 190 14
40620-246 190 14 246 190 14 246 190 14 246 190 14
40621-246 190 14 246 190 14 246 190 14 246 190 14
40622-246 190 14 246 190 14 246 190 14 246 190 14
40623-246 190 14 246 190 14 242 186 14 163 133 67
40624- 46 46 46 18 18 18 6 6 6 0 0 0
40625- 0 0 0 0 0 0 0 0 0 0 0 0
40626- 0 0 0 0 0 0 0 0 0 0 0 0
40627- 0 0 0 0 0 0 0 0 0 10 10 10
40628- 30 30 30 78 78 78 163 133 67 210 150 10
40629-236 178 12 246 186 14 246 190 14 246 190 14
40630-246 190 14 246 190 14 246 190 14 246 190 14
40631-246 190 14 246 190 14 246 190 14 246 190 14
40632-246 190 14 246 190 14 246 190 14 246 190 14
40633-241 196 14 215 174 15 190 178 144 253 253 253
40634-253 253 253 253 253 253 253 253 253 253 253 253
40635-253 253 253 253 253 253 253 253 253 253 253 253
40636-253 253 253 253 253 253 253 253 253 253 253 253
40637-253 253 253 253 253 253 253 253 253 218 218 218
40638- 58 58 58 2 2 6 22 18 6 167 114 7
40639-216 158 10 236 178 12 246 186 14 246 190 14
40640-246 190 14 246 190 14 246 190 14 246 190 14
40641-246 190 14 246 190 14 246 190 14 246 190 14
40642-246 190 14 246 190 14 246 190 14 246 190 14
40643-246 190 14 246 186 14 242 186 14 190 150 46
40644- 54 54 54 22 22 22 6 6 6 0 0 0
40645- 0 0 0 0 0 0 0 0 0 0 0 0
40646- 0 0 0 0 0 0 0 0 0 0 0 0
40647- 0 0 0 0 0 0 0 0 0 14 14 14
40648- 38 38 38 86 86 86 180 133 36 213 154 11
40649-236 178 12 246 186 14 246 190 14 246 190 14
40650-246 190 14 246 190 14 246 190 14 246 190 14
40651-246 190 14 246 190 14 246 190 14 246 190 14
40652-246 190 14 246 190 14 246 190 14 246 190 14
40653-246 190 14 232 195 16 190 146 13 214 214 214
40654-253 253 253 253 253 253 253 253 253 253 253 253
40655-253 253 253 253 253 253 253 253 253 253 253 253
40656-253 253 253 253 253 253 253 253 253 253 253 253
40657-253 253 253 250 250 250 170 170 170 26 26 26
40658- 2 2 6 2 2 6 37 26 9 163 110 8
40659-219 162 10 239 182 13 246 186 14 246 190 14
40660-246 190 14 246 190 14 246 190 14 246 190 14
40661-246 190 14 246 190 14 246 190 14 246 190 14
40662-246 190 14 246 190 14 246 190 14 246 190 14
40663-246 186 14 236 178 12 224 166 10 142 122 72
40664- 46 46 46 18 18 18 6 6 6 0 0 0
40665- 0 0 0 0 0 0 0 0 0 0 0 0
40666- 0 0 0 0 0 0 0 0 0 0 0 0
40667- 0 0 0 0 0 0 6 6 6 18 18 18
40668- 50 50 50 109 106 95 192 133 9 224 166 10
40669-242 186 14 246 190 14 246 190 14 246 190 14
40670-246 190 14 246 190 14 246 190 14 246 190 14
40671-246 190 14 246 190 14 246 190 14 246 190 14
40672-246 190 14 246 190 14 246 190 14 246 190 14
40673-242 186 14 226 184 13 210 162 10 142 110 46
40674-226 226 226 253 253 253 253 253 253 253 253 253
40675-253 253 253 253 253 253 253 253 253 253 253 253
40676-253 253 253 253 253 253 253 253 253 253 253 253
40677-198 198 198 66 66 66 2 2 6 2 2 6
40678- 2 2 6 2 2 6 50 34 6 156 107 11
40679-219 162 10 239 182 13 246 186 14 246 190 14
40680-246 190 14 246 190 14 246 190 14 246 190 14
40681-246 190 14 246 190 14 246 190 14 246 190 14
40682-246 190 14 246 190 14 246 190 14 242 186 14
40683-234 174 13 213 154 11 154 122 46 66 66 66
40684- 30 30 30 10 10 10 0 0 0 0 0 0
40685- 0 0 0 0 0 0 0 0 0 0 0 0
40686- 0 0 0 0 0 0 0 0 0 0 0 0
40687- 0 0 0 0 0 0 6 6 6 22 22 22
40688- 58 58 58 154 121 60 206 145 10 234 174 13
40689-242 186 14 246 186 14 246 190 14 246 190 14
40690-246 190 14 246 190 14 246 190 14 246 190 14
40691-246 190 14 246 190 14 246 190 14 246 190 14
40692-246 190 14 246 190 14 246 190 14 246 190 14
40693-246 186 14 236 178 12 210 162 10 163 110 8
40694- 61 42 6 138 138 138 218 218 218 250 250 250
40695-253 253 253 253 253 253 253 253 253 250 250 250
40696-242 242 242 210 210 210 144 144 144 66 66 66
40697- 6 6 6 2 2 6 2 2 6 2 2 6
40698- 2 2 6 2 2 6 61 42 6 163 110 8
40699-216 158 10 236 178 12 246 190 14 246 190 14
40700-246 190 14 246 190 14 246 190 14 246 190 14
40701-246 190 14 246 190 14 246 190 14 246 190 14
40702-246 190 14 239 182 13 230 174 11 216 158 10
40703-190 142 34 124 112 88 70 70 70 38 38 38
40704- 18 18 18 6 6 6 0 0 0 0 0 0
40705- 0 0 0 0 0 0 0 0 0 0 0 0
40706- 0 0 0 0 0 0 0 0 0 0 0 0
40707- 0 0 0 0 0 0 6 6 6 22 22 22
40708- 62 62 62 168 124 44 206 145 10 224 166 10
40709-236 178 12 239 182 13 242 186 14 242 186 14
40710-246 186 14 246 190 14 246 190 14 246 190 14
40711-246 190 14 246 190 14 246 190 14 246 190 14
40712-246 190 14 246 190 14 246 190 14 246 190 14
40713-246 190 14 236 178 12 216 158 10 175 118 6
40714- 80 54 7 2 2 6 6 6 6 30 30 30
40715- 54 54 54 62 62 62 50 50 50 38 38 38
40716- 14 14 14 2 2 6 2 2 6 2 2 6
40717- 2 2 6 2 2 6 2 2 6 2 2 6
40718- 2 2 6 6 6 6 80 54 7 167 114 7
40719-213 154 11 236 178 12 246 190 14 246 190 14
40720-246 190 14 246 190 14 246 190 14 246 190 14
40721-246 190 14 242 186 14 239 182 13 239 182 13
40722-230 174 11 210 150 10 174 135 50 124 112 88
40723- 82 82 82 54 54 54 34 34 34 18 18 18
40724- 6 6 6 0 0 0 0 0 0 0 0 0
40725- 0 0 0 0 0 0 0 0 0 0 0 0
40726- 0 0 0 0 0 0 0 0 0 0 0 0
40727- 0 0 0 0 0 0 6 6 6 18 18 18
40728- 50 50 50 158 118 36 192 133 9 200 144 11
40729-216 158 10 219 162 10 224 166 10 226 170 11
40730-230 174 11 236 178 12 239 182 13 239 182 13
40731-242 186 14 246 186 14 246 190 14 246 190 14
40732-246 190 14 246 190 14 246 190 14 246 190 14
40733-246 186 14 230 174 11 210 150 10 163 110 8
40734-104 69 6 10 10 10 2 2 6 2 2 6
40735- 2 2 6 2 2 6 2 2 6 2 2 6
40736- 2 2 6 2 2 6 2 2 6 2 2 6
40737- 2 2 6 2 2 6 2 2 6 2 2 6
40738- 2 2 6 6 6 6 91 60 6 167 114 7
40739-206 145 10 230 174 11 242 186 14 246 190 14
40740-246 190 14 246 190 14 246 186 14 242 186 14
40741-239 182 13 230 174 11 224 166 10 213 154 11
40742-180 133 36 124 112 88 86 86 86 58 58 58
40743- 38 38 38 22 22 22 10 10 10 6 6 6
40744- 0 0 0 0 0 0 0 0 0 0 0 0
40745- 0 0 0 0 0 0 0 0 0 0 0 0
40746- 0 0 0 0 0 0 0 0 0 0 0 0
40747- 0 0 0 0 0 0 0 0 0 14 14 14
40748- 34 34 34 70 70 70 138 110 50 158 118 36
40749-167 114 7 180 123 7 192 133 9 197 138 11
40750-200 144 11 206 145 10 213 154 11 219 162 10
40751-224 166 10 230 174 11 239 182 13 242 186 14
40752-246 186 14 246 186 14 246 186 14 246 186 14
40753-239 182 13 216 158 10 185 133 11 152 99 6
40754-104 69 6 18 14 6 2 2 6 2 2 6
40755- 2 2 6 2 2 6 2 2 6 2 2 6
40756- 2 2 6 2 2 6 2 2 6 2 2 6
40757- 2 2 6 2 2 6 2 2 6 2 2 6
40758- 2 2 6 6 6 6 80 54 7 152 99 6
40759-192 133 9 219 162 10 236 178 12 239 182 13
40760-246 186 14 242 186 14 239 182 13 236 178 12
40761-224 166 10 206 145 10 192 133 9 154 121 60
40762- 94 94 94 62 62 62 42 42 42 22 22 22
40763- 14 14 14 6 6 6 0 0 0 0 0 0
40764- 0 0 0 0 0 0 0 0 0 0 0 0
40765- 0 0 0 0 0 0 0 0 0 0 0 0
40766- 0 0 0 0 0 0 0 0 0 0 0 0
40767- 0 0 0 0 0 0 0 0 0 6 6 6
40768- 18 18 18 34 34 34 58 58 58 78 78 78
40769-101 98 89 124 112 88 142 110 46 156 107 11
40770-163 110 8 167 114 7 175 118 6 180 123 7
40771-185 133 11 197 138 11 210 150 10 219 162 10
40772-226 170 11 236 178 12 236 178 12 234 174 13
40773-219 162 10 197 138 11 163 110 8 130 83 6
40774- 91 60 6 10 10 10 2 2 6 2 2 6
40775- 18 18 18 38 38 38 38 38 38 38 38 38
40776- 38 38 38 38 38 38 38 38 38 38 38 38
40777- 38 38 38 38 38 38 26 26 26 2 2 6
40778- 2 2 6 6 6 6 70 47 6 137 92 6
40779-175 118 6 200 144 11 219 162 10 230 174 11
40780-234 174 13 230 174 11 219 162 10 210 150 10
40781-192 133 9 163 110 8 124 112 88 82 82 82
40782- 50 50 50 30 30 30 14 14 14 6 6 6
40783- 0 0 0 0 0 0 0 0 0 0 0 0
40784- 0 0 0 0 0 0 0 0 0 0 0 0
40785- 0 0 0 0 0 0 0 0 0 0 0 0
40786- 0 0 0 0 0 0 0 0 0 0 0 0
40787- 0 0 0 0 0 0 0 0 0 0 0 0
40788- 6 6 6 14 14 14 22 22 22 34 34 34
40789- 42 42 42 58 58 58 74 74 74 86 86 86
40790-101 98 89 122 102 70 130 98 46 121 87 25
40791-137 92 6 152 99 6 163 110 8 180 123 7
40792-185 133 11 197 138 11 206 145 10 200 144 11
40793-180 123 7 156 107 11 130 83 6 104 69 6
40794- 50 34 6 54 54 54 110 110 110 101 98 89
40795- 86 86 86 82 82 82 78 78 78 78 78 78
40796- 78 78 78 78 78 78 78 78 78 78 78 78
40797- 78 78 78 82 82 82 86 86 86 94 94 94
40798-106 106 106 101 101 101 86 66 34 124 80 6
40799-156 107 11 180 123 7 192 133 9 200 144 11
40800-206 145 10 200 144 11 192 133 9 175 118 6
40801-139 102 15 109 106 95 70 70 70 42 42 42
40802- 22 22 22 10 10 10 0 0 0 0 0 0
40803- 0 0 0 0 0 0 0 0 0 0 0 0
40804- 0 0 0 0 0 0 0 0 0 0 0 0
40805- 0 0 0 0 0 0 0 0 0 0 0 0
40806- 0 0 0 0 0 0 0 0 0 0 0 0
40807- 0 0 0 0 0 0 0 0 0 0 0 0
40808- 0 0 0 0 0 0 6 6 6 10 10 10
40809- 14 14 14 22 22 22 30 30 30 38 38 38
40810- 50 50 50 62 62 62 74 74 74 90 90 90
40811-101 98 89 112 100 78 121 87 25 124 80 6
40812-137 92 6 152 99 6 152 99 6 152 99 6
40813-138 86 6 124 80 6 98 70 6 86 66 30
40814-101 98 89 82 82 82 58 58 58 46 46 46
40815- 38 38 38 34 34 34 34 34 34 34 34 34
40816- 34 34 34 34 34 34 34 34 34 34 34 34
40817- 34 34 34 34 34 34 38 38 38 42 42 42
40818- 54 54 54 82 82 82 94 86 76 91 60 6
40819-134 86 6 156 107 11 167 114 7 175 118 6
40820-175 118 6 167 114 7 152 99 6 121 87 25
40821-101 98 89 62 62 62 34 34 34 18 18 18
40822- 6 6 6 0 0 0 0 0 0 0 0 0
40823- 0 0 0 0 0 0 0 0 0 0 0 0
40824- 0 0 0 0 0 0 0 0 0 0 0 0
40825- 0 0 0 0 0 0 0 0 0 0 0 0
40826- 0 0 0 0 0 0 0 0 0 0 0 0
40827- 0 0 0 0 0 0 0 0 0 0 0 0
40828- 0 0 0 0 0 0 0 0 0 0 0 0
40829- 0 0 0 6 6 6 6 6 6 10 10 10
40830- 18 18 18 22 22 22 30 30 30 42 42 42
40831- 50 50 50 66 66 66 86 86 86 101 98 89
40832-106 86 58 98 70 6 104 69 6 104 69 6
40833-104 69 6 91 60 6 82 62 34 90 90 90
40834- 62 62 62 38 38 38 22 22 22 14 14 14
40835- 10 10 10 10 10 10 10 10 10 10 10 10
40836- 10 10 10 10 10 10 6 6 6 10 10 10
40837- 10 10 10 10 10 10 10 10 10 14 14 14
40838- 22 22 22 42 42 42 70 70 70 89 81 66
40839- 80 54 7 104 69 6 124 80 6 137 92 6
40840-134 86 6 116 81 8 100 82 52 86 86 86
40841- 58 58 58 30 30 30 14 14 14 6 6 6
40842- 0 0 0 0 0 0 0 0 0 0 0 0
40843- 0 0 0 0 0 0 0 0 0 0 0 0
40844- 0 0 0 0 0 0 0 0 0 0 0 0
40845- 0 0 0 0 0 0 0 0 0 0 0 0
40846- 0 0 0 0 0 0 0 0 0 0 0 0
40847- 0 0 0 0 0 0 0 0 0 0 0 0
40848- 0 0 0 0 0 0 0 0 0 0 0 0
40849- 0 0 0 0 0 0 0 0 0 0 0 0
40850- 0 0 0 6 6 6 10 10 10 14 14 14
40851- 18 18 18 26 26 26 38 38 38 54 54 54
40852- 70 70 70 86 86 86 94 86 76 89 81 66
40853- 89 81 66 86 86 86 74 74 74 50 50 50
40854- 30 30 30 14 14 14 6 6 6 0 0 0
40855- 0 0 0 0 0 0 0 0 0 0 0 0
40856- 0 0 0 0 0 0 0 0 0 0 0 0
40857- 0 0 0 0 0 0 0 0 0 0 0 0
40858- 6 6 6 18 18 18 34 34 34 58 58 58
40859- 82 82 82 89 81 66 89 81 66 89 81 66
40860- 94 86 66 94 86 76 74 74 74 50 50 50
40861- 26 26 26 14 14 14 6 6 6 0 0 0
40862- 0 0 0 0 0 0 0 0 0 0 0 0
40863- 0 0 0 0 0 0 0 0 0 0 0 0
40864- 0 0 0 0 0 0 0 0 0 0 0 0
40865- 0 0 0 0 0 0 0 0 0 0 0 0
40866- 0 0 0 0 0 0 0 0 0 0 0 0
40867- 0 0 0 0 0 0 0 0 0 0 0 0
40868- 0 0 0 0 0 0 0 0 0 0 0 0
40869- 0 0 0 0 0 0 0 0 0 0 0 0
40870- 0 0 0 0 0 0 0 0 0 0 0 0
40871- 6 6 6 6 6 6 14 14 14 18 18 18
40872- 30 30 30 38 38 38 46 46 46 54 54 54
40873- 50 50 50 42 42 42 30 30 30 18 18 18
40874- 10 10 10 0 0 0 0 0 0 0 0 0
40875- 0 0 0 0 0 0 0 0 0 0 0 0
40876- 0 0 0 0 0 0 0 0 0 0 0 0
40877- 0 0 0 0 0 0 0 0 0 0 0 0
40878- 0 0 0 6 6 6 14 14 14 26 26 26
40879- 38 38 38 50 50 50 58 58 58 58 58 58
40880- 54 54 54 42 42 42 30 30 30 18 18 18
40881- 10 10 10 0 0 0 0 0 0 0 0 0
40882- 0 0 0 0 0 0 0 0 0 0 0 0
40883- 0 0 0 0 0 0 0 0 0 0 0 0
40884- 0 0 0 0 0 0 0 0 0 0 0 0
40885- 0 0 0 0 0 0 0 0 0 0 0 0
40886- 0 0 0 0 0 0 0 0 0 0 0 0
40887- 0 0 0 0 0 0 0 0 0 0 0 0
40888- 0 0 0 0 0 0 0 0 0 0 0 0
40889- 0 0 0 0 0 0 0 0 0 0 0 0
40890- 0 0 0 0 0 0 0 0 0 0 0 0
40891- 0 0 0 0 0 0 0 0 0 6 6 6
40892- 6 6 6 10 10 10 14 14 14 18 18 18
40893- 18 18 18 14 14 14 10 10 10 6 6 6
40894- 0 0 0 0 0 0 0 0 0 0 0 0
40895- 0 0 0 0 0 0 0 0 0 0 0 0
40896- 0 0 0 0 0 0 0 0 0 0 0 0
40897- 0 0 0 0 0 0 0 0 0 0 0 0
40898- 0 0 0 0 0 0 0 0 0 6 6 6
40899- 14 14 14 18 18 18 22 22 22 22 22 22
40900- 18 18 18 14 14 14 10 10 10 6 6 6
40901- 0 0 0 0 0 0 0 0 0 0 0 0
40902- 0 0 0 0 0 0 0 0 0 0 0 0
40903- 0 0 0 0 0 0 0 0 0 0 0 0
40904- 0 0 0 0 0 0 0 0 0 0 0 0
40905- 0 0 0 0 0 0 0 0 0 0 0 0
40906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40919+4 4 4 4 4 4
40920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40933+4 4 4 4 4 4
40934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40947+4 4 4 4 4 4
40948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40961+4 4 4 4 4 4
40962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40975+4 4 4 4 4 4
40976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40989+4 4 4 4 4 4
40990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40994+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
40995+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
40996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40999+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
41000+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41001+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
41002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41003+4 4 4 4 4 4
41004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41008+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
41009+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
41010+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41013+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
41014+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
41015+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
41016+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41017+4 4 4 4 4 4
41018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41022+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
41023+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
41024+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41027+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
41028+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
41029+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
41030+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
41031+4 4 4 4 4 4
41032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41035+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
41036+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
41037+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
41038+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
41039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41040+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
41041+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
41042+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
41043+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
41044+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
41045+4 4 4 4 4 4
41046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41049+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
41050+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
41051+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
41052+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
41053+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41054+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
41055+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
41056+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
41057+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
41058+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
41059+4 4 4 4 4 4
41060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41063+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
41064+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
41065+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
41066+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
41067+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41068+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
41069+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
41070+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
41071+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
41072+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
41073+4 4 4 4 4 4
41074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41076+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
41077+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
41078+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
41079+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
41080+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
41081+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
41082+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
41083+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
41084+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
41085+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
41086+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
41087+4 4 4 4 4 4
41088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41090+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
41091+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
41092+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
41093+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
41094+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
41095+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
41096+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
41097+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
41098+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
41099+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
41100+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
41101+4 4 4 4 4 4
41102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41104+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
41105+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
41106+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
41107+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
41108+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
41109+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
41110+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
41111+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
41112+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
41113+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
41114+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41115+4 4 4 4 4 4
41116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41118+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
41119+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
41120+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
41121+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
41122+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
41123+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
41124+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
41125+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
41126+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
41127+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
41128+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
41129+4 4 4 4 4 4
41130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41131+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
41132+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
41133+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
41134+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
41135+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
41136+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
41137+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
41138+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
41139+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
41140+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
41141+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
41142+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
41143+4 4 4 4 4 4
41144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41145+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
41146+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
41147+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
41148+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41149+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
41150+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
41151+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
41152+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
41153+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
41154+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
41155+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
41156+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
41157+0 0 0 4 4 4
41158+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
41159+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
41160+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
41161+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
41162+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
41163+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
41164+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
41165+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
41166+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
41167+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
41168+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
41169+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
41170+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
41171+2 0 0 0 0 0
41172+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
41173+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
41174+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
41175+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
41176+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
41177+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
41178+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
41179+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
41180+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
41181+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
41182+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
41183+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
41184+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
41185+37 38 37 0 0 0
41186+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
41187+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
41188+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
41189+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
41190+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
41191+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
41192+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
41193+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
41194+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
41195+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
41196+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
41197+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
41198+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
41199+85 115 134 4 0 0
41200+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
41201+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
41202+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
41203+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
41204+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
41205+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
41206+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
41207+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
41208+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
41209+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
41210+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
41211+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
41212+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
41213+60 73 81 4 0 0
41214+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
41215+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
41216+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
41217+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
41218+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
41219+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
41220+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
41221+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
41222+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
41223+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
41224+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
41225+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
41226+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
41227+16 19 21 4 0 0
41228+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
41229+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
41230+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
41231+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
41232+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
41233+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
41234+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
41235+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
41236+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
41237+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
41238+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
41239+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
41240+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
41241+4 0 0 4 3 3
41242+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
41243+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
41244+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
41245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
41246+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
41247+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
41248+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
41249+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
41250+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
41251+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
41252+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
41253+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
41254+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
41255+3 2 2 4 4 4
41256+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
41257+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
41258+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
41259+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
41260+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
41261+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
41262+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
41263+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
41264+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
41265+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
41266+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
41267+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
41268+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
41269+4 4 4 4 4 4
41270+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
41271+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
41272+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
41273+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
41274+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
41275+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
41276+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
41277+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
41278+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
41279+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
41280+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
41281+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
41282+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
41283+4 4 4 4 4 4
41284+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
41285+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
41286+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
41287+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
41288+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
41289+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
41290+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
41291+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
41292+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
41293+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
41294+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
41295+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
41296+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
41297+5 5 5 5 5 5
41298+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
41299+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
41300+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
41301+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
41302+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
41303+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41304+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
41305+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
41306+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
41307+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
41308+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
41309+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
41310+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41311+5 5 5 4 4 4
41312+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
41313+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
41314+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
41315+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
41316+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41317+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
41318+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
41319+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
41320+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
41321+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
41322+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
41323+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41325+4 4 4 4 4 4
41326+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
41327+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
41328+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
41329+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
41330+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
41331+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41332+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41333+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
41334+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
41335+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
41336+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
41337+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
41338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41339+4 4 4 4 4 4
41340+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
41341+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
41342+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
41343+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
41344+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41345+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
41346+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
41347+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
41348+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
41349+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
41350+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
41351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41353+4 4 4 4 4 4
41354+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
41355+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
41356+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
41357+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
41358+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41359+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41360+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41361+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
41362+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
41363+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
41364+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
41365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41367+4 4 4 4 4 4
41368+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
41369+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
41370+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
41371+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
41372+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41373+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
41374+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
41375+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
41376+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
41377+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
41378+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41381+4 4 4 4 4 4
41382+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
41383+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
41384+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
41385+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
41386+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41387+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
41388+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
41389+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
41390+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
41391+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
41392+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
41393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41395+4 4 4 4 4 4
41396+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
41397+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
41398+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
41399+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
41400+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41401+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
41402+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
41403+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
41404+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
41405+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
41406+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
41407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41409+4 4 4 4 4 4
41410+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
41411+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
41412+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
41413+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
41414+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
41415+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
41416+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
41417+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
41418+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
41419+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
41420+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41423+4 4 4 4 4 4
41424+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
41425+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
41426+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
41427+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
41428+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41429+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
41430+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
41431+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
41432+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
41433+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
41434+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41437+4 4 4 4 4 4
41438+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
41439+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
41440+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
41441+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
41442+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41443+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
41444+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
41445+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
41446+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
41447+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
41448+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41451+4 4 4 4 4 4
41452+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
41453+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
41454+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
41455+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
41456+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41457+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
41458+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
41459+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
41460+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41461+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41462+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41465+4 4 4 4 4 4
41466+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
41467+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
41468+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
41469+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
41470+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
41471+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
41472+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
41473+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
41474+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41475+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41476+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41479+4 4 4 4 4 4
41480+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
41481+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
41482+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
41483+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
41484+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41485+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
41486+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
41487+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
41488+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41489+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41490+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41493+4 4 4 4 4 4
41494+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
41495+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
41496+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41497+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
41498+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
41499+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
41500+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
41501+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
41502+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41503+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41504+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41507+4 4 4 4 4 4
41508+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
41509+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
41510+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41511+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
41512+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
41513+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
41514+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
41515+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
41516+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41517+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41518+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41521+4 4 4 4 4 4
41522+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41523+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
41524+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41525+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
41526+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
41527+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
41528+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
41529+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
41530+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41531+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41532+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41535+4 4 4 4 4 4
41536+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
41537+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
41538+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41539+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
41540+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
41541+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
41542+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
41543+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
41544+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
41545+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41546+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41549+4 4 4 4 4 4
41550+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41551+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
41552+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
41553+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
41554+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
41555+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
41556+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
41557+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
41558+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41559+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41560+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41563+4 4 4 4 4 4
41564+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41565+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
41566+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41567+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
41568+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
41569+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
41570+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
41571+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
41572+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41573+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41574+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41577+4 4 4 4 4 4
41578+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41579+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
41580+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
41581+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
41582+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
41583+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
41584+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41585+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
41586+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41587+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41588+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41591+4 4 4 4 4 4
41592+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41593+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
41594+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
41595+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41596+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
41597+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
41598+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41599+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
41600+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41601+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41602+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41605+4 4 4 4 4 4
41606+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41607+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
41608+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
41609+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
41610+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
41611+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
41612+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
41613+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
41614+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
41615+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41616+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41619+4 4 4 4 4 4
41620+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41621+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
41622+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
41623+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
41624+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
41625+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
41626+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
41627+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
41628+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
41629+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41630+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41633+4 4 4 4 4 4
41634+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
41635+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
41636+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
41637+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
41638+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41639+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
41640+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
41641+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
41642+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
41643+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41644+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41647+4 4 4 4 4 4
41648+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41649+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
41650+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
41651+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
41652+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
41653+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
41654+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
41655+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
41656+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
41657+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41658+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41661+4 4 4 4 4 4
41662+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
41663+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
41664+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
41665+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
41666+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
41667+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
41668+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
41669+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
41670+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
41671+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
41672+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41675+4 4 4 4 4 4
41676+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
41677+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41678+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
41679+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
41680+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
41681+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
41682+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
41683+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
41684+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
41685+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
41686+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41689+4 4 4 4 4 4
41690+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
41691+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41692+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
41693+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
41694+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41695+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41696+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41697+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
41698+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
41699+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
41700+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41703+4 4 4 4 4 4
41704+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
41705+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
41706+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
41707+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
41708+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
41709+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
41710+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
41711+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
41712+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
41713+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
41714+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41717+4 4 4 4 4 4
41718+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
41719+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
41720+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41721+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
41722+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
41723+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
41724+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
41725+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
41726+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
41727+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
41728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41731+4 4 4 4 4 4
41732+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41733+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
41734+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
41735+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
41736+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
41737+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
41738+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
41739+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
41740+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
41741+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41745+4 4 4 4 4 4
41746+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
41747+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
41748+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
41749+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
41750+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
41751+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
41752+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
41753+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
41754+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
41755+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41759+4 4 4 4 4 4
41760+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
41761+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
41762+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
41763+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
41764+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
41765+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
41766+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
41767+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
41768+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41769+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41773+4 4 4 4 4 4
41774+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
41775+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41776+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
41777+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41778+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
41779+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
41780+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
41781+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
41782+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
41783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41787+4 4 4 4 4 4
41788+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
41789+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
41790+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
41791+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
41792+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
41793+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
41794+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
41795+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
41796+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
41797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41801+4 4 4 4 4 4
41802+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41803+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
41804+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
41805+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
41806+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
41807+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
41808+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
41809+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
41810+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41815+4 4 4 4 4 4
41816+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
41817+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
41818+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41819+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
41820+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
41821+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
41822+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
41823+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
41824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41829+4 4 4 4 4 4
41830+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41831+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
41832+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
41833+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
41834+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
41835+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
41836+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
41837+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41843+4 4 4 4 4 4
41844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41845+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
41846+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41847+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
41848+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
41849+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
41850+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
41851+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
41852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41857+4 4 4 4 4 4
41858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41859+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
41860+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
41861+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
41862+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
41863+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
41864+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
41865+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
41866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41871+4 4 4 4 4 4
41872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41873+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41874+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
41875+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41876+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
41877+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
41878+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
41879+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41885+4 4 4 4 4 4
41886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41888+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41889+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
41890+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
41891+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
41892+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
41893+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41899+4 4 4 4 4 4
41900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41903+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41904+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
41905+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
41906+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
41907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41908+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41913+4 4 4 4 4 4
41914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41917+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41918+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41919+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
41920+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
41921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41927+4 4 4 4 4 4
41928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41931+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41932+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41933+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41934+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
41935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41941+4 4 4 4 4 4
41942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41945+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
41946+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
41947+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
41948+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
41949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41955+4 4 4 4 4 4
41956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41960+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
41961+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41962+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41969+4 4 4 4 4 4
41970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41974+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
41975+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
41976+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41983+4 4 4 4 4 4
41984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41988+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
41989+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
41990+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41997+4 4 4 4 4 4
41998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42002+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
42003+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
42004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42011+4 4 4 4 4 4
42012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42016+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
42017+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
42018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
42025+4 4 4 4 4 4
42026diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
42027index 8af6414..658c030 100644
42028--- a/drivers/video/udlfb.c
42029+++ b/drivers/video/udlfb.c
42030@@ -620,11 +620,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
42031 dlfb_urb_completion(urb);
42032
42033 error:
42034- atomic_add(bytes_sent, &dev->bytes_sent);
42035- atomic_add(bytes_identical, &dev->bytes_identical);
42036- atomic_add(width*height*2, &dev->bytes_rendered);
42037+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
42038+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
42039+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
42040 end_cycles = get_cycles();
42041- atomic_add(((unsigned int) ((end_cycles - start_cycles)
42042+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
42043 >> 10)), /* Kcycles */
42044 &dev->cpu_kcycles_used);
42045
42046@@ -745,11 +745,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
42047 dlfb_urb_completion(urb);
42048
42049 error:
42050- atomic_add(bytes_sent, &dev->bytes_sent);
42051- atomic_add(bytes_identical, &dev->bytes_identical);
42052- atomic_add(bytes_rendered, &dev->bytes_rendered);
42053+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
42054+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
42055+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
42056 end_cycles = get_cycles();
42057- atomic_add(((unsigned int) ((end_cycles - start_cycles)
42058+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
42059 >> 10)), /* Kcycles */
42060 &dev->cpu_kcycles_used);
42061 }
42062@@ -1373,7 +1373,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
42063 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42064 struct dlfb_data *dev = fb_info->par;
42065 return snprintf(buf, PAGE_SIZE, "%u\n",
42066- atomic_read(&dev->bytes_rendered));
42067+ atomic_read_unchecked(&dev->bytes_rendered));
42068 }
42069
42070 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
42071@@ -1381,7 +1381,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
42072 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42073 struct dlfb_data *dev = fb_info->par;
42074 return snprintf(buf, PAGE_SIZE, "%u\n",
42075- atomic_read(&dev->bytes_identical));
42076+ atomic_read_unchecked(&dev->bytes_identical));
42077 }
42078
42079 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
42080@@ -1389,7 +1389,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
42081 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42082 struct dlfb_data *dev = fb_info->par;
42083 return snprintf(buf, PAGE_SIZE, "%u\n",
42084- atomic_read(&dev->bytes_sent));
42085+ atomic_read_unchecked(&dev->bytes_sent));
42086 }
42087
42088 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
42089@@ -1397,7 +1397,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
42090 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42091 struct dlfb_data *dev = fb_info->par;
42092 return snprintf(buf, PAGE_SIZE, "%u\n",
42093- atomic_read(&dev->cpu_kcycles_used));
42094+ atomic_read_unchecked(&dev->cpu_kcycles_used));
42095 }
42096
42097 static ssize_t edid_show(
42098@@ -1457,10 +1457,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
42099 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42100 struct dlfb_data *dev = fb_info->par;
42101
42102- atomic_set(&dev->bytes_rendered, 0);
42103- atomic_set(&dev->bytes_identical, 0);
42104- atomic_set(&dev->bytes_sent, 0);
42105- atomic_set(&dev->cpu_kcycles_used, 0);
42106+ atomic_set_unchecked(&dev->bytes_rendered, 0);
42107+ atomic_set_unchecked(&dev->bytes_identical, 0);
42108+ atomic_set_unchecked(&dev->bytes_sent, 0);
42109+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
42110
42111 return count;
42112 }
42113diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
42114index b0e2a42..e2df3ad 100644
42115--- a/drivers/video/uvesafb.c
42116+++ b/drivers/video/uvesafb.c
42117@@ -19,6 +19,7 @@
42118 #include <linux/io.h>
42119 #include <linux/mutex.h>
42120 #include <linux/slab.h>
42121+#include <linux/moduleloader.h>
42122 #include <video/edid.h>
42123 #include <video/uvesafb.h>
42124 #ifdef CONFIG_X86
42125@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
42126 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
42127 par->pmi_setpal = par->ypan = 0;
42128 } else {
42129+
42130+#ifdef CONFIG_PAX_KERNEXEC
42131+#ifdef CONFIG_MODULES
42132+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
42133+#endif
42134+ if (!par->pmi_code) {
42135+ par->pmi_setpal = par->ypan = 0;
42136+ return 0;
42137+ }
42138+#endif
42139+
42140 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
42141 + task->t.regs.edi);
42142+
42143+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42144+ pax_open_kernel();
42145+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
42146+ pax_close_kernel();
42147+
42148+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
42149+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
42150+#else
42151 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
42152 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
42153+#endif
42154+
42155 printk(KERN_INFO "uvesafb: protected mode interface info at "
42156 "%04x:%04x\n",
42157 (u16)task->t.regs.es, (u16)task->t.regs.edi);
42158@@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
42159 par->ypan = ypan;
42160
42161 if (par->pmi_setpal || par->ypan) {
42162+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
42163 if (__supported_pte_mask & _PAGE_NX) {
42164 par->pmi_setpal = par->ypan = 0;
42165 printk(KERN_WARNING "uvesafb: NX protection is actively."
42166 "We have better not to use the PMI.\n");
42167- } else {
42168+ } else
42169+#endif
42170 uvesafb_vbe_getpmi(task, par);
42171- }
42172 }
42173 #else
42174 /* The protected mode interface is not available on non-x86. */
42175@@ -1836,6 +1860,11 @@ out:
42176 if (par->vbe_modes)
42177 kfree(par->vbe_modes);
42178
42179+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42180+ if (par->pmi_code)
42181+ module_free_exec(NULL, par->pmi_code);
42182+#endif
42183+
42184 framebuffer_release(info);
42185 return err;
42186 }
42187@@ -1862,6 +1891,12 @@ static int uvesafb_remove(struct platform_device *dev)
42188 kfree(par->vbe_state_orig);
42189 if (par->vbe_state_saved)
42190 kfree(par->vbe_state_saved);
42191+
42192+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42193+ if (par->pmi_code)
42194+ module_free_exec(NULL, par->pmi_code);
42195+#endif
42196+
42197 }
42198
42199 framebuffer_release(info);
42200diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
42201index 501b340..86bd4cf 100644
42202--- a/drivers/video/vesafb.c
42203+++ b/drivers/video/vesafb.c
42204@@ -9,6 +9,7 @@
42205 */
42206
42207 #include <linux/module.h>
42208+#include <linux/moduleloader.h>
42209 #include <linux/kernel.h>
42210 #include <linux/errno.h>
42211 #include <linux/string.h>
42212@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
42213 static int vram_total __initdata; /* Set total amount of memory */
42214 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
42215 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
42216-static void (*pmi_start)(void) __read_mostly;
42217-static void (*pmi_pal) (void) __read_mostly;
42218+static void (*pmi_start)(void) __read_only;
42219+static void (*pmi_pal) (void) __read_only;
42220 static int depth __read_mostly;
42221 static int vga_compat __read_mostly;
42222 /* --------------------------------------------------------------------- */
42223@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
42224 unsigned int size_vmode;
42225 unsigned int size_remap;
42226 unsigned int size_total;
42227+ void *pmi_code = NULL;
42228
42229 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
42230 return -ENODEV;
42231@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
42232 size_remap = size_total;
42233 vesafb_fix.smem_len = size_remap;
42234
42235-#ifndef __i386__
42236- screen_info.vesapm_seg = 0;
42237-#endif
42238-
42239 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
42240 printk(KERN_WARNING
42241 "vesafb: cannot reserve video memory at 0x%lx\n",
42242@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
42243 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
42244 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
42245
42246+#ifdef __i386__
42247+
42248+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42249+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
42250+ if (!pmi_code)
42251+#elif !defined(CONFIG_PAX_KERNEXEC)
42252+ if (0)
42253+#endif
42254+
42255+#endif
42256+ screen_info.vesapm_seg = 0;
42257+
42258 if (screen_info.vesapm_seg) {
42259- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
42260- screen_info.vesapm_seg,screen_info.vesapm_off);
42261+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
42262+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
42263 }
42264
42265 if (screen_info.vesapm_seg < 0xc000)
42266@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
42267
42268 if (ypan || pmi_setpal) {
42269 unsigned short *pmi_base;
42270+
42271 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
42272- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
42273- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
42274+
42275+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42276+ pax_open_kernel();
42277+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
42278+#else
42279+ pmi_code = pmi_base;
42280+#endif
42281+
42282+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
42283+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
42284+
42285+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42286+ pmi_start = ktva_ktla(pmi_start);
42287+ pmi_pal = ktva_ktla(pmi_pal);
42288+ pax_close_kernel();
42289+#endif
42290+
42291 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
42292 if (pmi_base[3]) {
42293 printk(KERN_INFO "vesafb: pmi: ports = ");
42294@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
42295 info->node, info->fix.id);
42296 return 0;
42297 err:
42298+
42299+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42300+ module_free_exec(NULL, pmi_code);
42301+#endif
42302+
42303 if (info->screen_base)
42304 iounmap(info->screen_base);
42305 framebuffer_release(info);
42306diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
42307index 88714ae..16c2e11 100644
42308--- a/drivers/video/via/via_clock.h
42309+++ b/drivers/video/via/via_clock.h
42310@@ -56,7 +56,7 @@ struct via_clock {
42311
42312 void (*set_engine_pll_state)(u8 state);
42313 void (*set_engine_pll)(struct via_pll_config config);
42314-};
42315+} __no_const;
42316
42317
42318 static inline u32 get_pll_internal_frequency(u32 ref_freq,
42319diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
42320index e56c934..fc22f4b 100644
42321--- a/drivers/xen/xen-pciback/conf_space.h
42322+++ b/drivers/xen/xen-pciback/conf_space.h
42323@@ -44,15 +44,15 @@ struct config_field {
42324 struct {
42325 conf_dword_write write;
42326 conf_dword_read read;
42327- } dw;
42328+ } __no_const dw;
42329 struct {
42330 conf_word_write write;
42331 conf_word_read read;
42332- } w;
42333+ } __no_const w;
42334 struct {
42335 conf_byte_write write;
42336 conf_byte_read read;
42337- } b;
42338+ } __no_const b;
42339 } u;
42340 struct list_head list;
42341 };
42342diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
42343index 57ccb75..f6d05f8 100644
42344--- a/fs/9p/vfs_inode.c
42345+++ b/fs/9p/vfs_inode.c
42346@@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
42347 void
42348 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42349 {
42350- char *s = nd_get_link(nd);
42351+ const char *s = nd_get_link(nd);
42352
42353 p9_debug(P9_DEBUG_VFS, " %s %s\n",
42354 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
42355diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
42356index 0225742..1cd4732 100644
42357--- a/fs/Kconfig.binfmt
42358+++ b/fs/Kconfig.binfmt
42359@@ -89,7 +89,7 @@ config HAVE_AOUT
42360
42361 config BINFMT_AOUT
42362 tristate "Kernel support for a.out and ECOFF binaries"
42363- depends on HAVE_AOUT
42364+ depends on HAVE_AOUT && BROKEN
42365 ---help---
42366 A.out (Assembler.OUTput) is a set of formats for libraries and
42367 executables used in the earliest versions of UNIX. Linux used
42368diff --git a/fs/aio.c b/fs/aio.c
42369index 55c4c76..11aee6f 100644
42370--- a/fs/aio.c
42371+++ b/fs/aio.c
42372@@ -118,7 +118,7 @@ static int aio_setup_ring(struct kioctx *ctx)
42373 size += sizeof(struct io_event) * nr_events;
42374 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
42375
42376- if (nr_pages < 0)
42377+ if (nr_pages <= 0)
42378 return -EINVAL;
42379
42380 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
42381@@ -1440,18 +1440,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
42382 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
42383 {
42384 ssize_t ret;
42385+ struct iovec iovstack;
42386
42387 #ifdef CONFIG_COMPAT
42388 if (compat)
42389 ret = compat_rw_copy_check_uvector(type,
42390 (struct compat_iovec __user *)kiocb->ki_buf,
42391- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
42392+ kiocb->ki_nbytes, 1, &iovstack,
42393 &kiocb->ki_iovec);
42394 else
42395 #endif
42396 ret = rw_copy_check_uvector(type,
42397 (struct iovec __user *)kiocb->ki_buf,
42398- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
42399+ kiocb->ki_nbytes, 1, &iovstack,
42400 &kiocb->ki_iovec);
42401 if (ret < 0)
42402 goto out;
42403@@ -1460,6 +1461,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
42404 if (ret < 0)
42405 goto out;
42406
42407+ if (kiocb->ki_iovec == &iovstack) {
42408+ kiocb->ki_inline_vec = iovstack;
42409+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
42410+ }
42411 kiocb->ki_nr_segs = kiocb->ki_nbytes;
42412 kiocb->ki_cur_seg = 0;
42413 /* ki_nbytes/left now reflect bytes instead of segs */
42414diff --git a/fs/attr.c b/fs/attr.c
42415index 0da9095..1386693 100644
42416--- a/fs/attr.c
42417+++ b/fs/attr.c
42418@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
42419 unsigned long limit;
42420
42421 limit = rlimit(RLIMIT_FSIZE);
42422+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
42423 if (limit != RLIM_INFINITY && offset > limit)
42424 goto out_sig;
42425 if (offset > inode->i_sb->s_maxbytes)
42426diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
42427index da8876d..4456166 100644
42428--- a/fs/autofs4/waitq.c
42429+++ b/fs/autofs4/waitq.c
42430@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
42431 {
42432 unsigned long sigpipe, flags;
42433 mm_segment_t fs;
42434- const char *data = (const char *)addr;
42435+ const char __user *data = (const char __force_user *)addr;
42436 ssize_t wr = 0;
42437
42438 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
42439@@ -348,6 +348,10 @@ static int validate_request(struct autofs_wait_queue **wait,
42440 return 1;
42441 }
42442
42443+#ifdef CONFIG_GRKERNSEC_HIDESYM
42444+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
42445+#endif
42446+
42447 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
42448 enum autofs_notify notify)
42449 {
42450@@ -381,7 +385,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
42451
42452 /* If this is a direct mount request create a dummy name */
42453 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
42454+#ifdef CONFIG_GRKERNSEC_HIDESYM
42455+ /* this name does get written to userland via autofs4_write() */
42456+ qstr.len = sprintf(name, "%08lx", atomic_inc_return_unchecked(&autofs_dummy_name_id));
42457+#else
42458 qstr.len = sprintf(name, "%p", dentry);
42459+#endif
42460 else {
42461 qstr.len = autofs4_getpath(sbi, dentry, &name);
42462 if (!qstr.len) {
42463diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
42464index e18da23..affc30e 100644
42465--- a/fs/befs/linuxvfs.c
42466+++ b/fs/befs/linuxvfs.c
42467@@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42468 {
42469 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
42470 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
42471- char *link = nd_get_link(nd);
42472+ const char *link = nd_get_link(nd);
42473 if (!IS_ERR(link))
42474 kfree(link);
42475 }
42476diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
42477index d146e18..12d1bd1 100644
42478--- a/fs/binfmt_aout.c
42479+++ b/fs/binfmt_aout.c
42480@@ -16,6 +16,7 @@
42481 #include <linux/string.h>
42482 #include <linux/fs.h>
42483 #include <linux/file.h>
42484+#include <linux/security.h>
42485 #include <linux/stat.h>
42486 #include <linux/fcntl.h>
42487 #include <linux/ptrace.h>
42488@@ -83,6 +84,8 @@ static int aout_core_dump(struct coredump_params *cprm)
42489 #endif
42490 # define START_STACK(u) ((void __user *)u.start_stack)
42491
42492+ memset(&dump, 0, sizeof(dump));
42493+
42494 fs = get_fs();
42495 set_fs(KERNEL_DS);
42496 has_dumped = 1;
42497@@ -94,10 +97,12 @@ static int aout_core_dump(struct coredump_params *cprm)
42498
42499 /* If the size of the dump file exceeds the rlimit, then see what would happen
42500 if we wrote the stack, but not the data area. */
42501+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
42502 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
42503 dump.u_dsize = 0;
42504
42505 /* Make sure we have enough room to write the stack and data areas. */
42506+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
42507 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
42508 dump.u_ssize = 0;
42509
42510@@ -231,6 +236,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42511 rlim = rlimit(RLIMIT_DATA);
42512 if (rlim >= RLIM_INFINITY)
42513 rlim = ~0;
42514+
42515+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
42516 if (ex.a_data + ex.a_bss > rlim)
42517 return -ENOMEM;
42518
42519@@ -265,6 +272,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42520
42521 install_exec_creds(bprm);
42522
42523+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42524+ current->mm->pax_flags = 0UL;
42525+#endif
42526+
42527+#ifdef CONFIG_PAX_PAGEEXEC
42528+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
42529+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
42530+
42531+#ifdef CONFIG_PAX_EMUTRAMP
42532+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
42533+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
42534+#endif
42535+
42536+#ifdef CONFIG_PAX_MPROTECT
42537+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
42538+ current->mm->pax_flags |= MF_PAX_MPROTECT;
42539+#endif
42540+
42541+ }
42542+#endif
42543+
42544 if (N_MAGIC(ex) == OMAGIC) {
42545 unsigned long text_addr, map_size;
42546 loff_t pos;
42547@@ -330,7 +358,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42548 }
42549
42550 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
42551- PROT_READ | PROT_WRITE | PROT_EXEC,
42552+ PROT_READ | PROT_WRITE,
42553 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
42554 fd_offset + ex.a_text);
42555 if (error != N_DATADDR(ex)) {
42556diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
42557index 1b52956..271266e 100644
42558--- a/fs/binfmt_elf.c
42559+++ b/fs/binfmt_elf.c
42560@@ -32,6 +32,7 @@
42561 #include <linux/elf.h>
42562 #include <linux/utsname.h>
42563 #include <linux/coredump.h>
42564+#include <linux/xattr.h>
42565 #include <asm/uaccess.h>
42566 #include <asm/param.h>
42567 #include <asm/page.h>
42568@@ -52,6 +53,10 @@ static int elf_core_dump(struct coredump_params *cprm);
42569 #define elf_core_dump NULL
42570 #endif
42571
42572+#ifdef CONFIG_PAX_MPROTECT
42573+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
42574+#endif
42575+
42576 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
42577 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
42578 #else
42579@@ -71,6 +76,11 @@ static struct linux_binfmt elf_format = {
42580 .load_binary = load_elf_binary,
42581 .load_shlib = load_elf_library,
42582 .core_dump = elf_core_dump,
42583+
42584+#ifdef CONFIG_PAX_MPROTECT
42585+ .handle_mprotect= elf_handle_mprotect,
42586+#endif
42587+
42588 .min_coredump = ELF_EXEC_PAGESIZE,
42589 };
42590
42591@@ -78,6 +88,8 @@ static struct linux_binfmt elf_format = {
42592
42593 static int set_brk(unsigned long start, unsigned long end)
42594 {
42595+ unsigned long e = end;
42596+
42597 start = ELF_PAGEALIGN(start);
42598 end = ELF_PAGEALIGN(end);
42599 if (end > start) {
42600@@ -86,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
42601 if (BAD_ADDR(addr))
42602 return addr;
42603 }
42604- current->mm->start_brk = current->mm->brk = end;
42605+ current->mm->start_brk = current->mm->brk = e;
42606 return 0;
42607 }
42608
42609@@ -147,12 +159,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42610 elf_addr_t __user *u_rand_bytes;
42611 const char *k_platform = ELF_PLATFORM;
42612 const char *k_base_platform = ELF_BASE_PLATFORM;
42613- unsigned char k_rand_bytes[16];
42614+ u32 k_rand_bytes[4];
42615 int items;
42616 elf_addr_t *elf_info;
42617 int ei_index = 0;
42618 const struct cred *cred = current_cred();
42619 struct vm_area_struct *vma;
42620+ unsigned long saved_auxv[AT_VECTOR_SIZE];
42621
42622 /*
42623 * In some cases (e.g. Hyper-Threading), we want to avoid L1
42624@@ -194,8 +207,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42625 * Generate 16 random bytes for userspace PRNG seeding.
42626 */
42627 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
42628- u_rand_bytes = (elf_addr_t __user *)
42629- STACK_ALLOC(p, sizeof(k_rand_bytes));
42630+ srandom32(k_rand_bytes[0] ^ random32());
42631+ srandom32(k_rand_bytes[1] ^ random32());
42632+ srandom32(k_rand_bytes[2] ^ random32());
42633+ srandom32(k_rand_bytes[3] ^ random32());
42634+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
42635+ u_rand_bytes = (elf_addr_t __user *) p;
42636 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
42637 return -EFAULT;
42638
42639@@ -307,9 +324,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42640 return -EFAULT;
42641 current->mm->env_end = p;
42642
42643+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
42644+
42645 /* Put the elf_info on the stack in the right place. */
42646 sp = (elf_addr_t __user *)envp + 1;
42647- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
42648+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
42649 return -EFAULT;
42650 return 0;
42651 }
42652@@ -378,10 +397,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42653 {
42654 struct elf_phdr *elf_phdata;
42655 struct elf_phdr *eppnt;
42656- unsigned long load_addr = 0;
42657+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
42658 int load_addr_set = 0;
42659 unsigned long last_bss = 0, elf_bss = 0;
42660- unsigned long error = ~0UL;
42661+ unsigned long error = -EINVAL;
42662 unsigned long total_size;
42663 int retval, i, size;
42664
42665@@ -427,6 +446,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42666 goto out_close;
42667 }
42668
42669+#ifdef CONFIG_PAX_SEGMEXEC
42670+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
42671+ pax_task_size = SEGMEXEC_TASK_SIZE;
42672+#endif
42673+
42674 eppnt = elf_phdata;
42675 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
42676 if (eppnt->p_type == PT_LOAD) {
42677@@ -470,8 +494,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42678 k = load_addr + eppnt->p_vaddr;
42679 if (BAD_ADDR(k) ||
42680 eppnt->p_filesz > eppnt->p_memsz ||
42681- eppnt->p_memsz > TASK_SIZE ||
42682- TASK_SIZE - eppnt->p_memsz < k) {
42683+ eppnt->p_memsz > pax_task_size ||
42684+ pax_task_size - eppnt->p_memsz < k) {
42685 error = -ENOMEM;
42686 goto out_close;
42687 }
42688@@ -523,6 +547,311 @@ out:
42689 return error;
42690 }
42691
42692+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42693+#ifdef CONFIG_PAX_SOFTMODE
42694+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
42695+{
42696+ unsigned long pax_flags = 0UL;
42697+
42698+#ifdef CONFIG_PAX_PAGEEXEC
42699+ if (elf_phdata->p_flags & PF_PAGEEXEC)
42700+ pax_flags |= MF_PAX_PAGEEXEC;
42701+#endif
42702+
42703+#ifdef CONFIG_PAX_SEGMEXEC
42704+ if (elf_phdata->p_flags & PF_SEGMEXEC)
42705+ pax_flags |= MF_PAX_SEGMEXEC;
42706+#endif
42707+
42708+#ifdef CONFIG_PAX_EMUTRAMP
42709+ if (elf_phdata->p_flags & PF_EMUTRAMP)
42710+ pax_flags |= MF_PAX_EMUTRAMP;
42711+#endif
42712+
42713+#ifdef CONFIG_PAX_MPROTECT
42714+ if (elf_phdata->p_flags & PF_MPROTECT)
42715+ pax_flags |= MF_PAX_MPROTECT;
42716+#endif
42717+
42718+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42719+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
42720+ pax_flags |= MF_PAX_RANDMMAP;
42721+#endif
42722+
42723+ return pax_flags;
42724+}
42725+#endif
42726+
42727+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
42728+{
42729+ unsigned long pax_flags = 0UL;
42730+
42731+#ifdef CONFIG_PAX_PAGEEXEC
42732+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
42733+ pax_flags |= MF_PAX_PAGEEXEC;
42734+#endif
42735+
42736+#ifdef CONFIG_PAX_SEGMEXEC
42737+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
42738+ pax_flags |= MF_PAX_SEGMEXEC;
42739+#endif
42740+
42741+#ifdef CONFIG_PAX_EMUTRAMP
42742+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
42743+ pax_flags |= MF_PAX_EMUTRAMP;
42744+#endif
42745+
42746+#ifdef CONFIG_PAX_MPROTECT
42747+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
42748+ pax_flags |= MF_PAX_MPROTECT;
42749+#endif
42750+
42751+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42752+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
42753+ pax_flags |= MF_PAX_RANDMMAP;
42754+#endif
42755+
42756+ return pax_flags;
42757+}
42758+#endif
42759+
42760+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42761+#ifdef CONFIG_PAX_SOFTMODE
42762+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
42763+{
42764+ unsigned long pax_flags = 0UL;
42765+
42766+#ifdef CONFIG_PAX_PAGEEXEC
42767+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
42768+ pax_flags |= MF_PAX_PAGEEXEC;
42769+#endif
42770+
42771+#ifdef CONFIG_PAX_SEGMEXEC
42772+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
42773+ pax_flags |= MF_PAX_SEGMEXEC;
42774+#endif
42775+
42776+#ifdef CONFIG_PAX_EMUTRAMP
42777+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
42778+ pax_flags |= MF_PAX_EMUTRAMP;
42779+#endif
42780+
42781+#ifdef CONFIG_PAX_MPROTECT
42782+ if (pax_flags_softmode & MF_PAX_MPROTECT)
42783+ pax_flags |= MF_PAX_MPROTECT;
42784+#endif
42785+
42786+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42787+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
42788+ pax_flags |= MF_PAX_RANDMMAP;
42789+#endif
42790+
42791+ return pax_flags;
42792+}
42793+#endif
42794+
42795+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
42796+{
42797+ unsigned long pax_flags = 0UL;
42798+
42799+#ifdef CONFIG_PAX_PAGEEXEC
42800+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
42801+ pax_flags |= MF_PAX_PAGEEXEC;
42802+#endif
42803+
42804+#ifdef CONFIG_PAX_SEGMEXEC
42805+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
42806+ pax_flags |= MF_PAX_SEGMEXEC;
42807+#endif
42808+
42809+#ifdef CONFIG_PAX_EMUTRAMP
42810+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
42811+ pax_flags |= MF_PAX_EMUTRAMP;
42812+#endif
42813+
42814+#ifdef CONFIG_PAX_MPROTECT
42815+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
42816+ pax_flags |= MF_PAX_MPROTECT;
42817+#endif
42818+
42819+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42820+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
42821+ pax_flags |= MF_PAX_RANDMMAP;
42822+#endif
42823+
42824+ return pax_flags;
42825+}
42826+#endif
42827+
42828+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42829+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
42830+{
42831+ unsigned long pax_flags = 0UL;
42832+
42833+#ifdef CONFIG_PAX_EI_PAX
42834+
42835+#ifdef CONFIG_PAX_PAGEEXEC
42836+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
42837+ pax_flags |= MF_PAX_PAGEEXEC;
42838+#endif
42839+
42840+#ifdef CONFIG_PAX_SEGMEXEC
42841+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
42842+ pax_flags |= MF_PAX_SEGMEXEC;
42843+#endif
42844+
42845+#ifdef CONFIG_PAX_EMUTRAMP
42846+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
42847+ pax_flags |= MF_PAX_EMUTRAMP;
42848+#endif
42849+
42850+#ifdef CONFIG_PAX_MPROTECT
42851+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
42852+ pax_flags |= MF_PAX_MPROTECT;
42853+#endif
42854+
42855+#ifdef CONFIG_PAX_ASLR
42856+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
42857+ pax_flags |= MF_PAX_RANDMMAP;
42858+#endif
42859+
42860+#else
42861+
42862+#ifdef CONFIG_PAX_PAGEEXEC
42863+ pax_flags |= MF_PAX_PAGEEXEC;
42864+#endif
42865+
42866+#ifdef CONFIG_PAX_SEGMEXEC
42867+ pax_flags |= MF_PAX_SEGMEXEC;
42868+#endif
42869+
42870+#ifdef CONFIG_PAX_MPROTECT
42871+ pax_flags |= MF_PAX_MPROTECT;
42872+#endif
42873+
42874+#ifdef CONFIG_PAX_RANDMMAP
42875+ if (randomize_va_space)
42876+ pax_flags |= MF_PAX_RANDMMAP;
42877+#endif
42878+
42879+#endif
42880+
42881+ return pax_flags;
42882+}
42883+
42884+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
42885+{
42886+
42887+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42888+ unsigned long i;
42889+
42890+ for (i = 0UL; i < elf_ex->e_phnum; i++)
42891+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
42892+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
42893+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
42894+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
42895+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
42896+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
42897+ return ~0UL;
42898+
42899+#ifdef CONFIG_PAX_SOFTMODE
42900+ if (pax_softmode)
42901+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
42902+ else
42903+#endif
42904+
42905+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
42906+ break;
42907+ }
42908+#endif
42909+
42910+ return ~0UL;
42911+}
42912+
42913+static unsigned long pax_parse_xattr_pax(struct file * const file)
42914+{
42915+
42916+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42917+ ssize_t xattr_size, i;
42918+ unsigned char xattr_value[5];
42919+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
42920+
42921+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
42922+ if (xattr_size <= 0)
42923+ return ~0UL;
42924+
42925+ for (i = 0; i < xattr_size; i++)
42926+ switch (xattr_value[i]) {
42927+ default:
42928+ return ~0UL;
42929+
42930+#define parse_flag(option1, option2, flag) \
42931+ case option1: \
42932+ pax_flags_hardmode |= MF_PAX_##flag; \
42933+ break; \
42934+ case option2: \
42935+ pax_flags_softmode |= MF_PAX_##flag; \
42936+ break;
42937+
42938+ parse_flag('p', 'P', PAGEEXEC);
42939+ parse_flag('e', 'E', EMUTRAMP);
42940+ parse_flag('m', 'M', MPROTECT);
42941+ parse_flag('r', 'R', RANDMMAP);
42942+ parse_flag('s', 'S', SEGMEXEC);
42943+
42944+#undef parse_flag
42945+ }
42946+
42947+ if (pax_flags_hardmode & pax_flags_softmode)
42948+ return ~0UL;
42949+
42950+#ifdef CONFIG_PAX_SOFTMODE
42951+ if (pax_softmode)
42952+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
42953+ else
42954+#endif
42955+
42956+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
42957+#else
42958+ return ~0UL;
42959+#endif
42960+
42961+}
42962+
42963+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
42964+{
42965+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
42966+
42967+ pax_flags = pax_parse_ei_pax(elf_ex);
42968+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
42969+ xattr_pax_flags = pax_parse_xattr_pax(file);
42970+
42971+ if (pt_pax_flags == ~0UL)
42972+ pt_pax_flags = xattr_pax_flags;
42973+ else if (xattr_pax_flags == ~0UL)
42974+ xattr_pax_flags = pt_pax_flags;
42975+ if (pt_pax_flags != xattr_pax_flags)
42976+ return -EINVAL;
42977+ if (pt_pax_flags != ~0UL)
42978+ pax_flags = pt_pax_flags;
42979+
42980+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42981+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42982+ if ((__supported_pte_mask & _PAGE_NX))
42983+ pax_flags &= ~MF_PAX_SEGMEXEC;
42984+ else
42985+ pax_flags &= ~MF_PAX_PAGEEXEC;
42986+ }
42987+#endif
42988+
42989+ if (0 > pax_check_flags(&pax_flags))
42990+ return -EINVAL;
42991+
42992+ current->mm->pax_flags = pax_flags;
42993+ return 0;
42994+}
42995+#endif
42996+
42997 /*
42998 * These are the functions used to load ELF style executables and shared
42999 * libraries. There is no binary dependent code anywhere else.
43000@@ -539,6 +868,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
43001 {
43002 unsigned int random_variable = 0;
43003
43004+#ifdef CONFIG_PAX_RANDUSTACK
43005+ if (randomize_va_space)
43006+ return stack_top - current->mm->delta_stack;
43007+#endif
43008+
43009 if ((current->flags & PF_RANDOMIZE) &&
43010 !(current->personality & ADDR_NO_RANDOMIZE)) {
43011 random_variable = get_random_int() & STACK_RND_MASK;
43012@@ -557,7 +891,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43013 unsigned long load_addr = 0, load_bias = 0;
43014 int load_addr_set = 0;
43015 char * elf_interpreter = NULL;
43016- unsigned long error;
43017+ unsigned long error = 0;
43018 struct elf_phdr *elf_ppnt, *elf_phdata;
43019 unsigned long elf_bss, elf_brk;
43020 int retval, i;
43021@@ -567,11 +901,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43022 unsigned long start_code, end_code, start_data, end_data;
43023 unsigned long reloc_func_desc __maybe_unused = 0;
43024 int executable_stack = EXSTACK_DEFAULT;
43025- unsigned long def_flags = 0;
43026 struct {
43027 struct elfhdr elf_ex;
43028 struct elfhdr interp_elf_ex;
43029 } *loc;
43030+ unsigned long pax_task_size = TASK_SIZE;
43031
43032 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
43033 if (!loc) {
43034@@ -707,11 +1041,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43035 goto out_free_dentry;
43036
43037 /* OK, This is the point of no return */
43038- current->mm->def_flags = def_flags;
43039+
43040+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
43041+ current->mm->pax_flags = 0UL;
43042+#endif
43043+
43044+#ifdef CONFIG_PAX_DLRESOLVE
43045+ current->mm->call_dl_resolve = 0UL;
43046+#endif
43047+
43048+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
43049+ current->mm->call_syscall = 0UL;
43050+#endif
43051+
43052+#ifdef CONFIG_PAX_ASLR
43053+ current->mm->delta_mmap = 0UL;
43054+ current->mm->delta_stack = 0UL;
43055+#endif
43056+
43057+ current->mm->def_flags = 0;
43058+
43059+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
43060+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
43061+ send_sig(SIGKILL, current, 0);
43062+ goto out_free_dentry;
43063+ }
43064+#endif
43065+
43066+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
43067+ pax_set_initial_flags(bprm);
43068+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
43069+ if (pax_set_initial_flags_func)
43070+ (pax_set_initial_flags_func)(bprm);
43071+#endif
43072+
43073+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43074+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
43075+ current->mm->context.user_cs_limit = PAGE_SIZE;
43076+ current->mm->def_flags |= VM_PAGEEXEC;
43077+ }
43078+#endif
43079+
43080+#ifdef CONFIG_PAX_SEGMEXEC
43081+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
43082+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
43083+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
43084+ pax_task_size = SEGMEXEC_TASK_SIZE;
43085+ current->mm->def_flags |= VM_NOHUGEPAGE;
43086+ }
43087+#endif
43088+
43089+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
43090+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43091+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
43092+ put_cpu();
43093+ }
43094+#endif
43095
43096 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
43097 may depend on the personality. */
43098 SET_PERSONALITY(loc->elf_ex);
43099+
43100+#ifdef CONFIG_PAX_ASLR
43101+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
43102+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
43103+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
43104+ }
43105+#endif
43106+
43107+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43108+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43109+ executable_stack = EXSTACK_DISABLE_X;
43110+ current->personality &= ~READ_IMPLIES_EXEC;
43111+ } else
43112+#endif
43113+
43114 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
43115 current->personality |= READ_IMPLIES_EXEC;
43116
43117@@ -802,6 +1206,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43118 #else
43119 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
43120 #endif
43121+
43122+#ifdef CONFIG_PAX_RANDMMAP
43123+ /* PaX: randomize base address at the default exe base if requested */
43124+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
43125+#ifdef CONFIG_SPARC64
43126+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
43127+#else
43128+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
43129+#endif
43130+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
43131+ elf_flags |= MAP_FIXED;
43132+ }
43133+#endif
43134+
43135 }
43136
43137 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
43138@@ -834,9 +1252,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43139 * allowed task size. Note that p_filesz must always be
43140 * <= p_memsz so it is only necessary to check p_memsz.
43141 */
43142- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
43143- elf_ppnt->p_memsz > TASK_SIZE ||
43144- TASK_SIZE - elf_ppnt->p_memsz < k) {
43145+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
43146+ elf_ppnt->p_memsz > pax_task_size ||
43147+ pax_task_size - elf_ppnt->p_memsz < k) {
43148 /* set_brk can never work. Avoid overflows. */
43149 send_sig(SIGKILL, current, 0);
43150 retval = -EINVAL;
43151@@ -875,11 +1293,41 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43152 goto out_free_dentry;
43153 }
43154 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
43155- send_sig(SIGSEGV, current, 0);
43156- retval = -EFAULT; /* Nobody gets to see this, but.. */
43157- goto out_free_dentry;
43158+ /*
43159+ * This bss-zeroing can fail if the ELF
43160+ * file specifies odd protections. So
43161+ * we don't check the return value
43162+ */
43163 }
43164
43165+#ifdef CONFIG_PAX_RANDMMAP
43166+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
43167+ unsigned long start, size;
43168+
43169+ start = ELF_PAGEALIGN(elf_brk);
43170+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
43171+ down_read(&current->mm->mmap_sem);
43172+ retval = -ENOMEM;
43173+ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
43174+ unsigned long prot = PROT_NONE;
43175+
43176+ up_read(&current->mm->mmap_sem);
43177+ current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
43178+// if (current->personality & ADDR_NO_RANDOMIZE)
43179+// prot = PROT_READ;
43180+ start = vm_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
43181+ retval = IS_ERR_VALUE(start) ? start : 0;
43182+ } else
43183+ up_read(&current->mm->mmap_sem);
43184+ if (retval == 0)
43185+ retval = set_brk(start + size, start + size + PAGE_SIZE);
43186+ if (retval < 0) {
43187+ send_sig(SIGKILL, current, 0);
43188+ goto out_free_dentry;
43189+ }
43190+ }
43191+#endif
43192+
43193 if (elf_interpreter) {
43194 unsigned long uninitialized_var(interp_map_addr);
43195
43196@@ -1107,7 +1555,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
43197 * Decide what to dump of a segment, part, all or none.
43198 */
43199 static unsigned long vma_dump_size(struct vm_area_struct *vma,
43200- unsigned long mm_flags)
43201+ unsigned long mm_flags, long signr)
43202 {
43203 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
43204
43205@@ -1144,7 +1592,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
43206 if (vma->vm_file == NULL)
43207 return 0;
43208
43209- if (FILTER(MAPPED_PRIVATE))
43210+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
43211 goto whole;
43212
43213 /*
43214@@ -1366,9 +1814,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
43215 {
43216 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
43217 int i = 0;
43218- do
43219+ do {
43220 i += 2;
43221- while (auxv[i - 2] != AT_NULL);
43222+ } while (auxv[i - 2] != AT_NULL);
43223 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
43224 }
43225
43226@@ -1890,14 +2338,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
43227 }
43228
43229 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
43230- unsigned long mm_flags)
43231+ struct coredump_params *cprm)
43232 {
43233 struct vm_area_struct *vma;
43234 size_t size = 0;
43235
43236 for (vma = first_vma(current, gate_vma); vma != NULL;
43237 vma = next_vma(vma, gate_vma))
43238- size += vma_dump_size(vma, mm_flags);
43239+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
43240 return size;
43241 }
43242
43243@@ -1991,7 +2439,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43244
43245 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
43246
43247- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
43248+ offset += elf_core_vma_data_size(gate_vma, cprm);
43249 offset += elf_core_extra_data_size();
43250 e_shoff = offset;
43251
43252@@ -2005,10 +2453,12 @@ static int elf_core_dump(struct coredump_params *cprm)
43253 offset = dataoff;
43254
43255 size += sizeof(*elf);
43256+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
43257 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
43258 goto end_coredump;
43259
43260 size += sizeof(*phdr4note);
43261+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
43262 if (size > cprm->limit
43263 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
43264 goto end_coredump;
43265@@ -2022,7 +2472,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43266 phdr.p_offset = offset;
43267 phdr.p_vaddr = vma->vm_start;
43268 phdr.p_paddr = 0;
43269- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
43270+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
43271 phdr.p_memsz = vma->vm_end - vma->vm_start;
43272 offset += phdr.p_filesz;
43273 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
43274@@ -2033,6 +2483,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43275 phdr.p_align = ELF_EXEC_PAGESIZE;
43276
43277 size += sizeof(phdr);
43278+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
43279 if (size > cprm->limit
43280 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
43281 goto end_coredump;
43282@@ -2057,7 +2508,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43283 unsigned long addr;
43284 unsigned long end;
43285
43286- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
43287+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
43288
43289 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
43290 struct page *page;
43291@@ -2066,6 +2517,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43292 page = get_dump_page(addr);
43293 if (page) {
43294 void *kaddr = kmap(page);
43295+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
43296 stop = ((size += PAGE_SIZE) > cprm->limit) ||
43297 !dump_write(cprm->file, kaddr,
43298 PAGE_SIZE);
43299@@ -2083,6 +2535,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43300
43301 if (e_phnum == PN_XNUM) {
43302 size += sizeof(*shdr4extnum);
43303+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
43304 if (size > cprm->limit
43305 || !dump_write(cprm->file, shdr4extnum,
43306 sizeof(*shdr4extnum)))
43307@@ -2103,6 +2556,97 @@ out:
43308
43309 #endif /* CONFIG_ELF_CORE */
43310
43311+#ifdef CONFIG_PAX_MPROTECT
43312+/* PaX: non-PIC ELF libraries need relocations on their executable segments
43313+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
43314+ * we'll remove VM_MAYWRITE for good on RELRO segments.
43315+ *
43316+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
43317+ * basis because we want to allow the common case and not the special ones.
43318+ */
43319+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
43320+{
43321+ struct elfhdr elf_h;
43322+ struct elf_phdr elf_p;
43323+ unsigned long i;
43324+ unsigned long oldflags;
43325+ bool is_textrel_rw, is_textrel_rx, is_relro;
43326+
43327+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
43328+ return;
43329+
43330+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
43331+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
43332+
43333+#ifdef CONFIG_PAX_ELFRELOCS
43334+ /* possible TEXTREL */
43335+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
43336+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
43337+#else
43338+ is_textrel_rw = false;
43339+ is_textrel_rx = false;
43340+#endif
43341+
43342+ /* possible RELRO */
43343+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
43344+
43345+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
43346+ return;
43347+
43348+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
43349+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
43350+
43351+#ifdef CONFIG_PAX_ETEXECRELOCS
43352+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
43353+#else
43354+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
43355+#endif
43356+
43357+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
43358+ !elf_check_arch(&elf_h) ||
43359+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
43360+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
43361+ return;
43362+
43363+ for (i = 0UL; i < elf_h.e_phnum; i++) {
43364+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
43365+ return;
43366+ switch (elf_p.p_type) {
43367+ case PT_DYNAMIC:
43368+ if (!is_textrel_rw && !is_textrel_rx)
43369+ continue;
43370+ i = 0UL;
43371+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
43372+ elf_dyn dyn;
43373+
43374+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
43375+ return;
43376+ if (dyn.d_tag == DT_NULL)
43377+ return;
43378+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
43379+ gr_log_textrel(vma);
43380+ if (is_textrel_rw)
43381+ vma->vm_flags |= VM_MAYWRITE;
43382+ else
43383+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
43384+ vma->vm_flags &= ~VM_MAYWRITE;
43385+ return;
43386+ }
43387+ i++;
43388+ }
43389+ return;
43390+
43391+ case PT_GNU_RELRO:
43392+ if (!is_relro)
43393+ continue;
43394+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
43395+ vma->vm_flags &= ~VM_MAYWRITE;
43396+ return;
43397+ }
43398+ }
43399+}
43400+#endif
43401+
43402 static int __init init_elf_binfmt(void)
43403 {
43404 register_binfmt(&elf_format);
43405diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
43406index 178cb70..8972997 100644
43407--- a/fs/binfmt_flat.c
43408+++ b/fs/binfmt_flat.c
43409@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
43410 realdatastart = (unsigned long) -ENOMEM;
43411 printk("Unable to allocate RAM for process data, errno %d\n",
43412 (int)-realdatastart);
43413+ down_write(&current->mm->mmap_sem);
43414 vm_munmap(textpos, text_len);
43415+ up_write(&current->mm->mmap_sem);
43416 ret = realdatastart;
43417 goto err;
43418 }
43419@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
43420 }
43421 if (IS_ERR_VALUE(result)) {
43422 printk("Unable to read data+bss, errno %d\n", (int)-result);
43423+ down_write(&current->mm->mmap_sem);
43424 vm_munmap(textpos, text_len);
43425 vm_munmap(realdatastart, len);
43426+ up_write(&current->mm->mmap_sem);
43427 ret = result;
43428 goto err;
43429 }
43430@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
43431 }
43432 if (IS_ERR_VALUE(result)) {
43433 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
43434+ down_write(&current->mm->mmap_sem);
43435 vm_munmap(textpos, text_len + data_len + extra +
43436 MAX_SHARED_LIBS * sizeof(unsigned long));
43437+ up_write(&current->mm->mmap_sem);
43438 ret = result;
43439 goto err;
43440 }
43441diff --git a/fs/bio.c b/fs/bio.c
43442index 73922ab..16642dd 100644
43443--- a/fs/bio.c
43444+++ b/fs/bio.c
43445@@ -841,7 +841,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
43446 /*
43447 * Overflow, abort
43448 */
43449- if (end < start)
43450+ if (end < start || end - start > INT_MAX - nr_pages)
43451 return ERR_PTR(-EINVAL);
43452
43453 nr_pages += end - start;
43454@@ -975,7 +975,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
43455 /*
43456 * Overflow, abort
43457 */
43458- if (end < start)
43459+ if (end < start || end - start > INT_MAX - nr_pages)
43460 return ERR_PTR(-EINVAL);
43461
43462 nr_pages += end - start;
43463@@ -1237,7 +1237,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
43464 const int read = bio_data_dir(bio) == READ;
43465 struct bio_map_data *bmd = bio->bi_private;
43466 int i;
43467- char *p = bmd->sgvecs[0].iov_base;
43468+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
43469
43470 __bio_for_each_segment(bvec, bio, i, 0) {
43471 char *addr = page_address(bvec->bv_page);
43472diff --git a/fs/block_dev.c b/fs/block_dev.c
43473index c2bbe1f..9dfbc23 100644
43474--- a/fs/block_dev.c
43475+++ b/fs/block_dev.c
43476@@ -704,7 +704,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
43477 else if (bdev->bd_contains == bdev)
43478 return true; /* is a whole device which isn't held */
43479
43480- else if (whole->bd_holder == bd_may_claim)
43481+ else if (whole->bd_holder == (void *)bd_may_claim)
43482 return true; /* is a partition of a device that is being partitioned */
43483 else if (whole->bd_holder != NULL)
43484 return false; /* is a partition of a held device */
43485diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
43486index da6e936..1598dd0 100644
43487--- a/fs/btrfs/check-integrity.c
43488+++ b/fs/btrfs/check-integrity.c
43489@@ -155,7 +155,7 @@ struct btrfsic_block {
43490 union {
43491 bio_end_io_t *bio;
43492 bh_end_io_t *bh;
43493- } orig_bio_bh_end_io;
43494+ } __no_const orig_bio_bh_end_io;
43495 int submit_bio_bh_rw;
43496 u64 flush_gen; /* only valid if !never_written */
43497 };
43498diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
43499index 8206b39..06d5654 100644
43500--- a/fs/btrfs/ctree.c
43501+++ b/fs/btrfs/ctree.c
43502@@ -973,9 +973,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
43503 free_extent_buffer(buf);
43504 add_root_to_dirty_list(root);
43505 } else {
43506- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
43507- parent_start = parent->start;
43508- else
43509+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
43510+ if (parent)
43511+ parent_start = parent->start;
43512+ else
43513+ parent_start = 0;
43514+ } else
43515 parent_start = 0;
43516
43517 WARN_ON(trans->transid != btrfs_header_generation(parent));
43518diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
43519index a7d1921..a32dba2 100644
43520--- a/fs/btrfs/inode.c
43521+++ b/fs/btrfs/inode.c
43522@@ -7111,7 +7111,7 @@ fail:
43523 return -ENOMEM;
43524 }
43525
43526-static int btrfs_getattr(struct vfsmount *mnt,
43527+int btrfs_getattr(struct vfsmount *mnt,
43528 struct dentry *dentry, struct kstat *stat)
43529 {
43530 struct inode *inode = dentry->d_inode;
43531@@ -7125,6 +7125,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
43532 return 0;
43533 }
43534
43535+EXPORT_SYMBOL(btrfs_getattr);
43536+
43537+dev_t get_btrfs_dev_from_inode(struct inode *inode)
43538+{
43539+ return BTRFS_I(inode)->root->anon_dev;
43540+}
43541+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
43542+
43543 /*
43544 * If a file is moved, it will inherit the cow and compression flags of the new
43545 * directory.
43546diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
43547index 0e92e57..8b560de 100644
43548--- a/fs/btrfs/ioctl.c
43549+++ b/fs/btrfs/ioctl.c
43550@@ -2902,9 +2902,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43551 for (i = 0; i < num_types; i++) {
43552 struct btrfs_space_info *tmp;
43553
43554+ /* Don't copy in more than we allocated */
43555 if (!slot_count)
43556 break;
43557
43558+ slot_count--;
43559+
43560 info = NULL;
43561 rcu_read_lock();
43562 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
43563@@ -2926,10 +2929,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43564 memcpy(dest, &space, sizeof(space));
43565 dest++;
43566 space_args.total_spaces++;
43567- slot_count--;
43568 }
43569- if (!slot_count)
43570- break;
43571 }
43572 up_read(&info->groups_sem);
43573 }
43574diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
43575index 646ee21..f020f87 100644
43576--- a/fs/btrfs/relocation.c
43577+++ b/fs/btrfs/relocation.c
43578@@ -1268,7 +1268,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
43579 }
43580 spin_unlock(&rc->reloc_root_tree.lock);
43581
43582- BUG_ON((struct btrfs_root *)node->data != root);
43583+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
43584
43585 if (!del) {
43586 spin_lock(&rc->reloc_root_tree.lock);
43587diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
43588index 622f469..e8d2d55 100644
43589--- a/fs/cachefiles/bind.c
43590+++ b/fs/cachefiles/bind.c
43591@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
43592 args);
43593
43594 /* start by checking things over */
43595- ASSERT(cache->fstop_percent >= 0 &&
43596- cache->fstop_percent < cache->fcull_percent &&
43597+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
43598 cache->fcull_percent < cache->frun_percent &&
43599 cache->frun_percent < 100);
43600
43601- ASSERT(cache->bstop_percent >= 0 &&
43602- cache->bstop_percent < cache->bcull_percent &&
43603+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
43604 cache->bcull_percent < cache->brun_percent &&
43605 cache->brun_percent < 100);
43606
43607diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
43608index 0a1467b..6a53245 100644
43609--- a/fs/cachefiles/daemon.c
43610+++ b/fs/cachefiles/daemon.c
43611@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
43612 if (n > buflen)
43613 return -EMSGSIZE;
43614
43615- if (copy_to_user(_buffer, buffer, n) != 0)
43616+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
43617 return -EFAULT;
43618
43619 return n;
43620@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
43621 if (test_bit(CACHEFILES_DEAD, &cache->flags))
43622 return -EIO;
43623
43624- if (datalen < 0 || datalen > PAGE_SIZE - 1)
43625+ if (datalen > PAGE_SIZE - 1)
43626 return -EOPNOTSUPP;
43627
43628 /* drag the command string into the kernel so we can parse it */
43629@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
43630 if (args[0] != '%' || args[1] != '\0')
43631 return -EINVAL;
43632
43633- if (fstop < 0 || fstop >= cache->fcull_percent)
43634+ if (fstop >= cache->fcull_percent)
43635 return cachefiles_daemon_range_error(cache, args);
43636
43637 cache->fstop_percent = fstop;
43638@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
43639 if (args[0] != '%' || args[1] != '\0')
43640 return -EINVAL;
43641
43642- if (bstop < 0 || bstop >= cache->bcull_percent)
43643+ if (bstop >= cache->bcull_percent)
43644 return cachefiles_daemon_range_error(cache, args);
43645
43646 cache->bstop_percent = bstop;
43647diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
43648index bd6bc1b..b627b53 100644
43649--- a/fs/cachefiles/internal.h
43650+++ b/fs/cachefiles/internal.h
43651@@ -57,7 +57,7 @@ struct cachefiles_cache {
43652 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
43653 struct rb_root active_nodes; /* active nodes (can't be culled) */
43654 rwlock_t active_lock; /* lock for active_nodes */
43655- atomic_t gravecounter; /* graveyard uniquifier */
43656+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
43657 unsigned frun_percent; /* when to stop culling (% files) */
43658 unsigned fcull_percent; /* when to start culling (% files) */
43659 unsigned fstop_percent; /* when to stop allocating (% files) */
43660@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
43661 * proc.c
43662 */
43663 #ifdef CONFIG_CACHEFILES_HISTOGRAM
43664-extern atomic_t cachefiles_lookup_histogram[HZ];
43665-extern atomic_t cachefiles_mkdir_histogram[HZ];
43666-extern atomic_t cachefiles_create_histogram[HZ];
43667+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43668+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43669+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
43670
43671 extern int __init cachefiles_proc_init(void);
43672 extern void cachefiles_proc_cleanup(void);
43673 static inline
43674-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
43675+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
43676 {
43677 unsigned long jif = jiffies - start_jif;
43678 if (jif >= HZ)
43679 jif = HZ - 1;
43680- atomic_inc(&histogram[jif]);
43681+ atomic_inc_unchecked(&histogram[jif]);
43682 }
43683
43684 #else
43685diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
43686index 7f0771d..87d4f36 100644
43687--- a/fs/cachefiles/namei.c
43688+++ b/fs/cachefiles/namei.c
43689@@ -318,7 +318,7 @@ try_again:
43690 /* first step is to make up a grave dentry in the graveyard */
43691 sprintf(nbuffer, "%08x%08x",
43692 (uint32_t) get_seconds(),
43693- (uint32_t) atomic_inc_return(&cache->gravecounter));
43694+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
43695
43696 /* do the multiway lock magic */
43697 trap = lock_rename(cache->graveyard, dir);
43698diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
43699index eccd339..4c1d995 100644
43700--- a/fs/cachefiles/proc.c
43701+++ b/fs/cachefiles/proc.c
43702@@ -14,9 +14,9 @@
43703 #include <linux/seq_file.h>
43704 #include "internal.h"
43705
43706-atomic_t cachefiles_lookup_histogram[HZ];
43707-atomic_t cachefiles_mkdir_histogram[HZ];
43708-atomic_t cachefiles_create_histogram[HZ];
43709+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43710+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43711+atomic_unchecked_t cachefiles_create_histogram[HZ];
43712
43713 /*
43714 * display the latency histogram
43715@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
43716 return 0;
43717 default:
43718 index = (unsigned long) v - 3;
43719- x = atomic_read(&cachefiles_lookup_histogram[index]);
43720- y = atomic_read(&cachefiles_mkdir_histogram[index]);
43721- z = atomic_read(&cachefiles_create_histogram[index]);
43722+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
43723+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
43724+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
43725 if (x == 0 && y == 0 && z == 0)
43726 return 0;
43727
43728diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
43729index 0e3c092..818480e 100644
43730--- a/fs/cachefiles/rdwr.c
43731+++ b/fs/cachefiles/rdwr.c
43732@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
43733 old_fs = get_fs();
43734 set_fs(KERNEL_DS);
43735 ret = file->f_op->write(
43736- file, (const void __user *) data, len, &pos);
43737+ file, (const void __force_user *) data, len, &pos);
43738 set_fs(old_fs);
43739 kunmap(page);
43740 if (ret != len)
43741diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
43742index 3e8094b..cb3ff3d 100644
43743--- a/fs/ceph/dir.c
43744+++ b/fs/ceph/dir.c
43745@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
43746 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
43747 struct ceph_mds_client *mdsc = fsc->mdsc;
43748 unsigned frag = fpos_frag(filp->f_pos);
43749- int off = fpos_off(filp->f_pos);
43750+ unsigned int off = fpos_off(filp->f_pos);
43751 int err;
43752 u32 ftype;
43753 struct ceph_mds_reply_info_parsed *rinfo;
43754@@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
43755 if (nd &&
43756 (nd->flags & LOOKUP_OPEN) &&
43757 !(nd->intent.open.flags & O_CREAT)) {
43758- int mode = nd->intent.open.create_mode & ~current->fs->umask;
43759+ int mode = nd->intent.open.create_mode & ~current_umask();
43760 return ceph_lookup_open(dir, dentry, nd, mode, 1);
43761 }
43762
43763diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
43764index e814052..28dcdf7 100644
43765--- a/fs/cifs/cifs_debug.c
43766+++ b/fs/cifs/cifs_debug.c
43767@@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43768
43769 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
43770 #ifdef CONFIG_CIFS_STATS2
43771- atomic_set(&totBufAllocCount, 0);
43772- atomic_set(&totSmBufAllocCount, 0);
43773+ atomic_set_unchecked(&totBufAllocCount, 0);
43774+ atomic_set_unchecked(&totSmBufAllocCount, 0);
43775 #endif /* CONFIG_CIFS_STATS2 */
43776 spin_lock(&cifs_tcp_ses_lock);
43777 list_for_each(tmp1, &cifs_tcp_ses_list) {
43778@@ -281,25 +281,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43779 tcon = list_entry(tmp3,
43780 struct cifs_tcon,
43781 tcon_list);
43782- atomic_set(&tcon->num_smbs_sent, 0);
43783- atomic_set(&tcon->num_writes, 0);
43784- atomic_set(&tcon->num_reads, 0);
43785- atomic_set(&tcon->num_oplock_brks, 0);
43786- atomic_set(&tcon->num_opens, 0);
43787- atomic_set(&tcon->num_posixopens, 0);
43788- atomic_set(&tcon->num_posixmkdirs, 0);
43789- atomic_set(&tcon->num_closes, 0);
43790- atomic_set(&tcon->num_deletes, 0);
43791- atomic_set(&tcon->num_mkdirs, 0);
43792- atomic_set(&tcon->num_rmdirs, 0);
43793- atomic_set(&tcon->num_renames, 0);
43794- atomic_set(&tcon->num_t2renames, 0);
43795- atomic_set(&tcon->num_ffirst, 0);
43796- atomic_set(&tcon->num_fnext, 0);
43797- atomic_set(&tcon->num_fclose, 0);
43798- atomic_set(&tcon->num_hardlinks, 0);
43799- atomic_set(&tcon->num_symlinks, 0);
43800- atomic_set(&tcon->num_locks, 0);
43801+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
43802+ atomic_set_unchecked(&tcon->num_writes, 0);
43803+ atomic_set_unchecked(&tcon->num_reads, 0);
43804+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
43805+ atomic_set_unchecked(&tcon->num_opens, 0);
43806+ atomic_set_unchecked(&tcon->num_posixopens, 0);
43807+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
43808+ atomic_set_unchecked(&tcon->num_closes, 0);
43809+ atomic_set_unchecked(&tcon->num_deletes, 0);
43810+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
43811+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
43812+ atomic_set_unchecked(&tcon->num_renames, 0);
43813+ atomic_set_unchecked(&tcon->num_t2renames, 0);
43814+ atomic_set_unchecked(&tcon->num_ffirst, 0);
43815+ atomic_set_unchecked(&tcon->num_fnext, 0);
43816+ atomic_set_unchecked(&tcon->num_fclose, 0);
43817+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
43818+ atomic_set_unchecked(&tcon->num_symlinks, 0);
43819+ atomic_set_unchecked(&tcon->num_locks, 0);
43820 }
43821 }
43822 }
43823@@ -329,8 +329,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43824 smBufAllocCount.counter, cifs_min_small);
43825 #ifdef CONFIG_CIFS_STATS2
43826 seq_printf(m, "Total Large %d Small %d Allocations\n",
43827- atomic_read(&totBufAllocCount),
43828- atomic_read(&totSmBufAllocCount));
43829+ atomic_read_unchecked(&totBufAllocCount),
43830+ atomic_read_unchecked(&totSmBufAllocCount));
43831 #endif /* CONFIG_CIFS_STATS2 */
43832
43833 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
43834@@ -359,41 +359,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43835 if (tcon->need_reconnect)
43836 seq_puts(m, "\tDISCONNECTED ");
43837 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
43838- atomic_read(&tcon->num_smbs_sent),
43839- atomic_read(&tcon->num_oplock_brks));
43840+ atomic_read_unchecked(&tcon->num_smbs_sent),
43841+ atomic_read_unchecked(&tcon->num_oplock_brks));
43842 seq_printf(m, "\nReads: %d Bytes: %lld",
43843- atomic_read(&tcon->num_reads),
43844+ atomic_read_unchecked(&tcon->num_reads),
43845 (long long)(tcon->bytes_read));
43846 seq_printf(m, "\nWrites: %d Bytes: %lld",
43847- atomic_read(&tcon->num_writes),
43848+ atomic_read_unchecked(&tcon->num_writes),
43849 (long long)(tcon->bytes_written));
43850 seq_printf(m, "\nFlushes: %d",
43851- atomic_read(&tcon->num_flushes));
43852+ atomic_read_unchecked(&tcon->num_flushes));
43853 seq_printf(m, "\nLocks: %d HardLinks: %d "
43854 "Symlinks: %d",
43855- atomic_read(&tcon->num_locks),
43856- atomic_read(&tcon->num_hardlinks),
43857- atomic_read(&tcon->num_symlinks));
43858+ atomic_read_unchecked(&tcon->num_locks),
43859+ atomic_read_unchecked(&tcon->num_hardlinks),
43860+ atomic_read_unchecked(&tcon->num_symlinks));
43861 seq_printf(m, "\nOpens: %d Closes: %d "
43862 "Deletes: %d",
43863- atomic_read(&tcon->num_opens),
43864- atomic_read(&tcon->num_closes),
43865- atomic_read(&tcon->num_deletes));
43866+ atomic_read_unchecked(&tcon->num_opens),
43867+ atomic_read_unchecked(&tcon->num_closes),
43868+ atomic_read_unchecked(&tcon->num_deletes));
43869 seq_printf(m, "\nPosix Opens: %d "
43870 "Posix Mkdirs: %d",
43871- atomic_read(&tcon->num_posixopens),
43872- atomic_read(&tcon->num_posixmkdirs));
43873+ atomic_read_unchecked(&tcon->num_posixopens),
43874+ atomic_read_unchecked(&tcon->num_posixmkdirs));
43875 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
43876- atomic_read(&tcon->num_mkdirs),
43877- atomic_read(&tcon->num_rmdirs));
43878+ atomic_read_unchecked(&tcon->num_mkdirs),
43879+ atomic_read_unchecked(&tcon->num_rmdirs));
43880 seq_printf(m, "\nRenames: %d T2 Renames %d",
43881- atomic_read(&tcon->num_renames),
43882- atomic_read(&tcon->num_t2renames));
43883+ atomic_read_unchecked(&tcon->num_renames),
43884+ atomic_read_unchecked(&tcon->num_t2renames));
43885 seq_printf(m, "\nFindFirst: %d FNext %d "
43886 "FClose %d",
43887- atomic_read(&tcon->num_ffirst),
43888- atomic_read(&tcon->num_fnext),
43889- atomic_read(&tcon->num_fclose));
43890+ atomic_read_unchecked(&tcon->num_ffirst),
43891+ atomic_read_unchecked(&tcon->num_fnext),
43892+ atomic_read_unchecked(&tcon->num_fclose));
43893 }
43894 }
43895 }
43896diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
43897index 8b6e344..303a662 100644
43898--- a/fs/cifs/cifsfs.c
43899+++ b/fs/cifs/cifsfs.c
43900@@ -994,7 +994,7 @@ cifs_init_request_bufs(void)
43901 cifs_req_cachep = kmem_cache_create("cifs_request",
43902 CIFSMaxBufSize +
43903 MAX_CIFS_HDR_SIZE, 0,
43904- SLAB_HWCACHE_ALIGN, NULL);
43905+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
43906 if (cifs_req_cachep == NULL)
43907 return -ENOMEM;
43908
43909@@ -1021,7 +1021,7 @@ cifs_init_request_bufs(void)
43910 efficient to alloc 1 per page off the slab compared to 17K (5page)
43911 alloc of large cifs buffers even when page debugging is on */
43912 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
43913- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
43914+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
43915 NULL);
43916 if (cifs_sm_req_cachep == NULL) {
43917 mempool_destroy(cifs_req_poolp);
43918@@ -1106,8 +1106,8 @@ init_cifs(void)
43919 atomic_set(&bufAllocCount, 0);
43920 atomic_set(&smBufAllocCount, 0);
43921 #ifdef CONFIG_CIFS_STATS2
43922- atomic_set(&totBufAllocCount, 0);
43923- atomic_set(&totSmBufAllocCount, 0);
43924+ atomic_set_unchecked(&totBufAllocCount, 0);
43925+ atomic_set_unchecked(&totSmBufAllocCount, 0);
43926 #endif /* CONFIG_CIFS_STATS2 */
43927
43928 atomic_set(&midCount, 0);
43929diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
43930index d86ba9f..e80049d 100644
43931--- a/fs/cifs/cifsglob.h
43932+++ b/fs/cifs/cifsglob.h
43933@@ -491,28 +491,28 @@ struct cifs_tcon {
43934 __u16 Flags; /* optional support bits */
43935 enum statusEnum tidStatus;
43936 #ifdef CONFIG_CIFS_STATS
43937- atomic_t num_smbs_sent;
43938- atomic_t num_writes;
43939- atomic_t num_reads;
43940- atomic_t num_flushes;
43941- atomic_t num_oplock_brks;
43942- atomic_t num_opens;
43943- atomic_t num_closes;
43944- atomic_t num_deletes;
43945- atomic_t num_mkdirs;
43946- atomic_t num_posixopens;
43947- atomic_t num_posixmkdirs;
43948- atomic_t num_rmdirs;
43949- atomic_t num_renames;
43950- atomic_t num_t2renames;
43951- atomic_t num_ffirst;
43952- atomic_t num_fnext;
43953- atomic_t num_fclose;
43954- atomic_t num_hardlinks;
43955- atomic_t num_symlinks;
43956- atomic_t num_locks;
43957- atomic_t num_acl_get;
43958- atomic_t num_acl_set;
43959+ atomic_unchecked_t num_smbs_sent;
43960+ atomic_unchecked_t num_writes;
43961+ atomic_unchecked_t num_reads;
43962+ atomic_unchecked_t num_flushes;
43963+ atomic_unchecked_t num_oplock_brks;
43964+ atomic_unchecked_t num_opens;
43965+ atomic_unchecked_t num_closes;
43966+ atomic_unchecked_t num_deletes;
43967+ atomic_unchecked_t num_mkdirs;
43968+ atomic_unchecked_t num_posixopens;
43969+ atomic_unchecked_t num_posixmkdirs;
43970+ atomic_unchecked_t num_rmdirs;
43971+ atomic_unchecked_t num_renames;
43972+ atomic_unchecked_t num_t2renames;
43973+ atomic_unchecked_t num_ffirst;
43974+ atomic_unchecked_t num_fnext;
43975+ atomic_unchecked_t num_fclose;
43976+ atomic_unchecked_t num_hardlinks;
43977+ atomic_unchecked_t num_symlinks;
43978+ atomic_unchecked_t num_locks;
43979+ atomic_unchecked_t num_acl_get;
43980+ atomic_unchecked_t num_acl_set;
43981 #ifdef CONFIG_CIFS_STATS2
43982 unsigned long long time_writes;
43983 unsigned long long time_reads;
43984@@ -735,7 +735,7 @@ convert_delimiter(char *path, char delim)
43985 }
43986
43987 #ifdef CONFIG_CIFS_STATS
43988-#define cifs_stats_inc atomic_inc
43989+#define cifs_stats_inc atomic_inc_unchecked
43990
43991 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
43992 unsigned int bytes)
43993@@ -1093,8 +1093,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
43994 /* Various Debug counters */
43995 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
43996 #ifdef CONFIG_CIFS_STATS2
43997-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
43998-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
43999+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
44000+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
44001 #endif
44002 GLOBAL_EXTERN atomic_t smBufAllocCount;
44003 GLOBAL_EXTERN atomic_t midCount;
44004diff --git a/fs/cifs/link.c b/fs/cifs/link.c
44005index 6b0e064..94e6c3c 100644
44006--- a/fs/cifs/link.c
44007+++ b/fs/cifs/link.c
44008@@ -600,7 +600,7 @@ symlink_exit:
44009
44010 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
44011 {
44012- char *p = nd_get_link(nd);
44013+ const char *p = nd_get_link(nd);
44014 if (!IS_ERR(p))
44015 kfree(p);
44016 }
44017diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
44018index 557506a..2fd3816 100644
44019--- a/fs/cifs/misc.c
44020+++ b/fs/cifs/misc.c
44021@@ -156,7 +156,7 @@ cifs_buf_get(void)
44022 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
44023 atomic_inc(&bufAllocCount);
44024 #ifdef CONFIG_CIFS_STATS2
44025- atomic_inc(&totBufAllocCount);
44026+ atomic_inc_unchecked(&totBufAllocCount);
44027 #endif /* CONFIG_CIFS_STATS2 */
44028 }
44029
44030@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
44031 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
44032 atomic_inc(&smBufAllocCount);
44033 #ifdef CONFIG_CIFS_STATS2
44034- atomic_inc(&totSmBufAllocCount);
44035+ atomic_inc_unchecked(&totSmBufAllocCount);
44036 #endif /* CONFIG_CIFS_STATS2 */
44037
44038 }
44039diff --git a/fs/coda/cache.c b/fs/coda/cache.c
44040index 6901578..d402eb5 100644
44041--- a/fs/coda/cache.c
44042+++ b/fs/coda/cache.c
44043@@ -24,7 +24,7 @@
44044 #include "coda_linux.h"
44045 #include "coda_cache.h"
44046
44047-static atomic_t permission_epoch = ATOMIC_INIT(0);
44048+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
44049
44050 /* replace or extend an acl cache hit */
44051 void coda_cache_enter(struct inode *inode, int mask)
44052@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
44053 struct coda_inode_info *cii = ITOC(inode);
44054
44055 spin_lock(&cii->c_lock);
44056- cii->c_cached_epoch = atomic_read(&permission_epoch);
44057+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
44058 if (cii->c_uid != current_fsuid()) {
44059 cii->c_uid = current_fsuid();
44060 cii->c_cached_perm = mask;
44061@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
44062 {
44063 struct coda_inode_info *cii = ITOC(inode);
44064 spin_lock(&cii->c_lock);
44065- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
44066+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
44067 spin_unlock(&cii->c_lock);
44068 }
44069
44070 /* remove all acl caches */
44071 void coda_cache_clear_all(struct super_block *sb)
44072 {
44073- atomic_inc(&permission_epoch);
44074+ atomic_inc_unchecked(&permission_epoch);
44075 }
44076
44077
44078@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
44079 spin_lock(&cii->c_lock);
44080 hit = (mask & cii->c_cached_perm) == mask &&
44081 cii->c_uid == current_fsuid() &&
44082- cii->c_cached_epoch == atomic_read(&permission_epoch);
44083+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
44084 spin_unlock(&cii->c_lock);
44085
44086 return hit;
44087diff --git a/fs/compat.c b/fs/compat.c
44088index 1bdb350..9f28287 100644
44089--- a/fs/compat.c
44090+++ b/fs/compat.c
44091@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
44092
44093 set_fs(KERNEL_DS);
44094 /* The __user pointer cast is valid because of the set_fs() */
44095- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
44096+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
44097 set_fs(oldfs);
44098 /* truncating is ok because it's a user address */
44099 if (!ret)
44100@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
44101 goto out;
44102
44103 ret = -EINVAL;
44104- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
44105+ if (nr_segs > UIO_MAXIOV)
44106 goto out;
44107 if (nr_segs > fast_segs) {
44108 ret = -ENOMEM;
44109@@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
44110
44111 struct compat_readdir_callback {
44112 struct compat_old_linux_dirent __user *dirent;
44113+ struct file * file;
44114 int result;
44115 };
44116
44117@@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
44118 buf->result = -EOVERFLOW;
44119 return -EOVERFLOW;
44120 }
44121+
44122+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44123+ return 0;
44124+
44125 buf->result++;
44126 dirent = buf->dirent;
44127 if (!access_ok(VERIFY_WRITE, dirent,
44128@@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
44129
44130 buf.result = 0;
44131 buf.dirent = dirent;
44132+ buf.file = file;
44133
44134 error = vfs_readdir(file, compat_fillonedir, &buf);
44135 if (buf.result)
44136@@ -899,6 +905,7 @@ struct compat_linux_dirent {
44137 struct compat_getdents_callback {
44138 struct compat_linux_dirent __user *current_dir;
44139 struct compat_linux_dirent __user *previous;
44140+ struct file * file;
44141 int count;
44142 int error;
44143 };
44144@@ -920,6 +927,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
44145 buf->error = -EOVERFLOW;
44146 return -EOVERFLOW;
44147 }
44148+
44149+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44150+ return 0;
44151+
44152 dirent = buf->previous;
44153 if (dirent) {
44154 if (__put_user(offset, &dirent->d_off))
44155@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
44156 buf.previous = NULL;
44157 buf.count = count;
44158 buf.error = 0;
44159+ buf.file = file;
44160
44161 error = vfs_readdir(file, compat_filldir, &buf);
44162 if (error >= 0)
44163@@ -986,6 +998,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
44164 struct compat_getdents_callback64 {
44165 struct linux_dirent64 __user *current_dir;
44166 struct linux_dirent64 __user *previous;
44167+ struct file * file;
44168 int count;
44169 int error;
44170 };
44171@@ -1002,6 +1015,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
44172 buf->error = -EINVAL; /* only used if we fail.. */
44173 if (reclen > buf->count)
44174 return -EINVAL;
44175+
44176+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44177+ return 0;
44178+
44179 dirent = buf->previous;
44180
44181 if (dirent) {
44182@@ -1052,13 +1069,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
44183 buf.previous = NULL;
44184 buf.count = count;
44185 buf.error = 0;
44186+ buf.file = file;
44187
44188 error = vfs_readdir(file, compat_filldir64, &buf);
44189 if (error >= 0)
44190 error = buf.error;
44191 lastdirent = buf.previous;
44192 if (lastdirent) {
44193- typeof(lastdirent->d_off) d_off = file->f_pos;
44194+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
44195 if (__put_user_unaligned(d_off, &lastdirent->d_off))
44196 error = -EFAULT;
44197 else
44198diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
44199index 112e45a..b59845b 100644
44200--- a/fs/compat_binfmt_elf.c
44201+++ b/fs/compat_binfmt_elf.c
44202@@ -30,11 +30,13 @@
44203 #undef elf_phdr
44204 #undef elf_shdr
44205 #undef elf_note
44206+#undef elf_dyn
44207 #undef elf_addr_t
44208 #define elfhdr elf32_hdr
44209 #define elf_phdr elf32_phdr
44210 #define elf_shdr elf32_shdr
44211 #define elf_note elf32_note
44212+#define elf_dyn Elf32_Dyn
44213 #define elf_addr_t Elf32_Addr
44214
44215 /*
44216diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
44217index debdfe0..75d31d4 100644
44218--- a/fs/compat_ioctl.c
44219+++ b/fs/compat_ioctl.c
44220@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
44221
44222 err = get_user(palp, &up->palette);
44223 err |= get_user(length, &up->length);
44224+ if (err)
44225+ return -EFAULT;
44226
44227 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
44228 err = put_user(compat_ptr(palp), &up_native->palette);
44229@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
44230 return -EFAULT;
44231 if (__get_user(udata, &ss32->iomem_base))
44232 return -EFAULT;
44233- ss.iomem_base = compat_ptr(udata);
44234+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
44235 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
44236 __get_user(ss.port_high, &ss32->port_high))
44237 return -EFAULT;
44238@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
44239 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
44240 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
44241 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
44242- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
44243+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
44244 return -EFAULT;
44245
44246 return ioctl_preallocate(file, p);
44247@@ -1610,8 +1612,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
44248 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
44249 {
44250 unsigned int a, b;
44251- a = *(unsigned int *)p;
44252- b = *(unsigned int *)q;
44253+ a = *(const unsigned int *)p;
44254+ b = *(const unsigned int *)q;
44255 if (a > b)
44256 return 1;
44257 if (a < b)
44258diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
44259index 7e6c52d..94bc756 100644
44260--- a/fs/configfs/dir.c
44261+++ b/fs/configfs/dir.c
44262@@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
44263 }
44264 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
44265 struct configfs_dirent *next;
44266- const char * name;
44267+ const unsigned char * name;
44268+ char d_name[sizeof(next->s_dentry->d_iname)];
44269 int len;
44270 struct inode *inode = NULL;
44271
44272@@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
44273 continue;
44274
44275 name = configfs_get_name(next);
44276- len = strlen(name);
44277+ if (next->s_dentry && name == next->s_dentry->d_iname) {
44278+ len = next->s_dentry->d_name.len;
44279+ memcpy(d_name, name, len);
44280+ name = d_name;
44281+ } else
44282+ len = strlen(name);
44283
44284 /*
44285 * We'll have a dentry and an inode for
44286diff --git a/fs/dcache.c b/fs/dcache.c
44287index 4046904..5e31505 100644
44288--- a/fs/dcache.c
44289+++ b/fs/dcache.c
44290@@ -3154,7 +3154,7 @@ void __init vfs_caches_init(unsigned long mempages)
44291 mempages -= reserve;
44292
44293 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
44294- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
44295+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
44296
44297 dcache_init();
44298 inode_init();
44299diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
44300index b80bc84..0d46d1a 100644
44301--- a/fs/debugfs/inode.c
44302+++ b/fs/debugfs/inode.c
44303@@ -408,7 +408,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
44304 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
44305 {
44306 return debugfs_create_file(name,
44307+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
44308+ S_IFDIR | S_IRWXU,
44309+#else
44310 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
44311+#endif
44312 parent, NULL, NULL);
44313 }
44314 EXPORT_SYMBOL_GPL(debugfs_create_dir);
44315diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
44316index a07441a..5c47fa2 100644
44317--- a/fs/ecryptfs/inode.c
44318+++ b/fs/ecryptfs/inode.c
44319@@ -621,6 +621,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
44320 struct dentry *lower_old_dir_dentry;
44321 struct dentry *lower_new_dir_dentry;
44322 struct dentry *trap = NULL;
44323+ struct inode *target_inode;
44324
44325 lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
44326 lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
44327@@ -628,6 +629,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
44328 dget(lower_new_dentry);
44329 lower_old_dir_dentry = dget_parent(lower_old_dentry);
44330 lower_new_dir_dentry = dget_parent(lower_new_dentry);
44331+ target_inode = new_dentry->d_inode;
44332 trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
44333 /* source should not be ancestor of target */
44334 if (trap == lower_old_dentry) {
44335@@ -643,6 +645,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
44336 lower_new_dir_dentry->d_inode, lower_new_dentry);
44337 if (rc)
44338 goto out_lock;
44339+ if (target_inode)
44340+ fsstack_copy_attr_all(target_inode,
44341+ ecryptfs_inode_to_lower(target_inode));
44342 fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode);
44343 if (new_dir != old_dir)
44344 fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
44345@@ -671,7 +676,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
44346 old_fs = get_fs();
44347 set_fs(get_ds());
44348 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
44349- (char __user *)lower_buf,
44350+ (char __force_user *)lower_buf,
44351 PATH_MAX);
44352 set_fs(old_fs);
44353 if (rc < 0)
44354@@ -703,7 +708,7 @@ out:
44355 static void
44356 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
44357 {
44358- char *buf = nd_get_link(nd);
44359+ const char *buf = nd_get_link(nd);
44360 if (!IS_ERR(buf)) {
44361 /* Free the char* */
44362 kfree(buf);
44363diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
44364index c0038f6..47ab347 100644
44365--- a/fs/ecryptfs/miscdev.c
44366+++ b/fs/ecryptfs/miscdev.c
44367@@ -355,7 +355,7 @@ check_list:
44368 goto out_unlock_msg_ctx;
44369 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
44370 if (msg_ctx->msg) {
44371- if (copy_to_user(&buf[i], packet_length, packet_length_size))
44372+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
44373 goto out_unlock_msg_ctx;
44374 i += packet_length_size;
44375 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
44376diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
44377index b2a34a1..162fa69 100644
44378--- a/fs/ecryptfs/read_write.c
44379+++ b/fs/ecryptfs/read_write.c
44380@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
44381 return -EIO;
44382 fs_save = get_fs();
44383 set_fs(get_ds());
44384- rc = vfs_write(lower_file, data, size, &offset);
44385+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
44386 set_fs(fs_save);
44387 mark_inode_dirty_sync(ecryptfs_inode);
44388 return rc;
44389@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
44390 return -EIO;
44391 fs_save = get_fs();
44392 set_fs(get_ds());
44393- rc = vfs_read(lower_file, data, size, &offset);
44394+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
44395 set_fs(fs_save);
44396 return rc;
44397 }
44398diff --git a/fs/eventpoll.c b/fs/eventpoll.c
44399index 1c8b556..eedec84 100644
44400--- a/fs/eventpoll.c
44401+++ b/fs/eventpoll.c
44402@@ -1654,8 +1654,8 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
44403 error = PTR_ERR(file);
44404 goto out_free_fd;
44405 }
44406- fd_install(fd, file);
44407 ep->file = file;
44408+ fd_install(fd, file);
44409 return fd;
44410
44411 out_free_fd:
44412diff --git a/fs/exec.c b/fs/exec.c
44413index e95aeed..a943469 100644
44414--- a/fs/exec.c
44415+++ b/fs/exec.c
44416@@ -55,6 +55,15 @@
44417 #include <linux/pipe_fs_i.h>
44418 #include <linux/oom.h>
44419 #include <linux/compat.h>
44420+#include <linux/random.h>
44421+#include <linux/seq_file.h>
44422+
44423+#ifdef CONFIG_PAX_REFCOUNT
44424+#include <linux/kallsyms.h>
44425+#include <linux/kdebug.h>
44426+#endif
44427+
44428+#include <trace/events/fs.h>
44429
44430 #include <asm/uaccess.h>
44431 #include <asm/mmu_context.h>
44432@@ -66,6 +75,18 @@
44433
44434 #include <trace/events/sched.h>
44435
44436+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
44437+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
44438+{
44439+ WARN_ONCE(1, "PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
44440+}
44441+#endif
44442+
44443+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
44444+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
44445+EXPORT_SYMBOL(pax_set_initial_flags_func);
44446+#endif
44447+
44448 int core_uses_pid;
44449 char core_pattern[CORENAME_MAX_SIZE] = "core";
44450 unsigned int core_pipe_limit;
44451@@ -75,7 +96,7 @@ struct core_name {
44452 char *corename;
44453 int used, size;
44454 };
44455-static atomic_t call_count = ATOMIC_INIT(1);
44456+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
44457
44458 /* The maximal length of core_pattern is also specified in sysctl.c */
44459
44460@@ -191,18 +212,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
44461 int write)
44462 {
44463 struct page *page;
44464- int ret;
44465
44466-#ifdef CONFIG_STACK_GROWSUP
44467- if (write) {
44468- ret = expand_downwards(bprm->vma, pos);
44469- if (ret < 0)
44470- return NULL;
44471- }
44472-#endif
44473- ret = get_user_pages(current, bprm->mm, pos,
44474- 1, write, 1, &page, NULL);
44475- if (ret <= 0)
44476+ if (0 > expand_downwards(bprm->vma, pos))
44477+ return NULL;
44478+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
44479 return NULL;
44480
44481 if (write) {
44482@@ -218,6 +231,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
44483 if (size <= ARG_MAX)
44484 return page;
44485
44486+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44487+ // only allow 512KB for argv+env on suid/sgid binaries
44488+ // to prevent easy ASLR exhaustion
44489+ if (((bprm->cred->euid != current_euid()) ||
44490+ (bprm->cred->egid != current_egid())) &&
44491+ (size > (512 * 1024))) {
44492+ put_page(page);
44493+ return NULL;
44494+ }
44495+#endif
44496+
44497 /*
44498 * Limit to 1/4-th the stack size for the argv+env strings.
44499 * This ensures that:
44500@@ -277,6 +301,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
44501 vma->vm_end = STACK_TOP_MAX;
44502 vma->vm_start = vma->vm_end - PAGE_SIZE;
44503 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
44504+
44505+#ifdef CONFIG_PAX_SEGMEXEC
44506+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
44507+#endif
44508+
44509 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
44510 INIT_LIST_HEAD(&vma->anon_vma_chain);
44511
44512@@ -287,6 +316,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
44513 mm->stack_vm = mm->total_vm = 1;
44514 up_write(&mm->mmap_sem);
44515 bprm->p = vma->vm_end - sizeof(void *);
44516+
44517+#ifdef CONFIG_PAX_RANDUSTACK
44518+ if (randomize_va_space)
44519+ bprm->p ^= random32() & ~PAGE_MASK;
44520+#endif
44521+
44522 return 0;
44523 err:
44524 up_write(&mm->mmap_sem);
44525@@ -395,19 +430,7 @@ err:
44526 return err;
44527 }
44528
44529-struct user_arg_ptr {
44530-#ifdef CONFIG_COMPAT
44531- bool is_compat;
44532-#endif
44533- union {
44534- const char __user *const __user *native;
44535-#ifdef CONFIG_COMPAT
44536- compat_uptr_t __user *compat;
44537-#endif
44538- } ptr;
44539-};
44540-
44541-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44542+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44543 {
44544 const char __user *native;
44545
44546@@ -416,14 +439,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44547 compat_uptr_t compat;
44548
44549 if (get_user(compat, argv.ptr.compat + nr))
44550- return ERR_PTR(-EFAULT);
44551+ return (const char __force_user *)ERR_PTR(-EFAULT);
44552
44553 return compat_ptr(compat);
44554 }
44555 #endif
44556
44557 if (get_user(native, argv.ptr.native + nr))
44558- return ERR_PTR(-EFAULT);
44559+ return (const char __force_user *)ERR_PTR(-EFAULT);
44560
44561 return native;
44562 }
44563@@ -442,7 +465,7 @@ static int count(struct user_arg_ptr argv, int max)
44564 if (!p)
44565 break;
44566
44567- if (IS_ERR(p))
44568+ if (IS_ERR((const char __force_kernel *)p))
44569 return -EFAULT;
44570
44571 if (i++ >= max)
44572@@ -476,7 +499,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
44573
44574 ret = -EFAULT;
44575 str = get_user_arg_ptr(argv, argc);
44576- if (IS_ERR(str))
44577+ if (IS_ERR((const char __force_kernel *)str))
44578 goto out;
44579
44580 len = strnlen_user(str, MAX_ARG_STRLEN);
44581@@ -558,7 +581,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
44582 int r;
44583 mm_segment_t oldfs = get_fs();
44584 struct user_arg_ptr argv = {
44585- .ptr.native = (const char __user *const __user *)__argv,
44586+ .ptr.native = (const char __force_user *const __force_user *)__argv,
44587 };
44588
44589 set_fs(KERNEL_DS);
44590@@ -593,7 +616,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44591 unsigned long new_end = old_end - shift;
44592 struct mmu_gather tlb;
44593
44594- BUG_ON(new_start > new_end);
44595+ if (new_start >= new_end || new_start < mmap_min_addr)
44596+ return -ENOMEM;
44597
44598 /*
44599 * ensure there are no vmas between where we want to go
44600@@ -602,6 +626,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44601 if (vma != find_vma(mm, new_start))
44602 return -EFAULT;
44603
44604+#ifdef CONFIG_PAX_SEGMEXEC
44605+ BUG_ON(pax_find_mirror_vma(vma));
44606+#endif
44607+
44608 /*
44609 * cover the whole range: [new_start, old_end)
44610 */
44611@@ -682,10 +710,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44612 stack_top = arch_align_stack(stack_top);
44613 stack_top = PAGE_ALIGN(stack_top);
44614
44615- if (unlikely(stack_top < mmap_min_addr) ||
44616- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
44617- return -ENOMEM;
44618-
44619 stack_shift = vma->vm_end - stack_top;
44620
44621 bprm->p -= stack_shift;
44622@@ -697,8 +721,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
44623 bprm->exec -= stack_shift;
44624
44625 down_write(&mm->mmap_sem);
44626+
44627+ /* Move stack pages down in memory. */
44628+ if (stack_shift) {
44629+ ret = shift_arg_pages(vma, stack_shift);
44630+ if (ret)
44631+ goto out_unlock;
44632+ }
44633+
44634 vm_flags = VM_STACK_FLAGS;
44635
44636+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44637+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44638+ vm_flags &= ~VM_EXEC;
44639+
44640+#ifdef CONFIG_PAX_MPROTECT
44641+ if (mm->pax_flags & MF_PAX_MPROTECT)
44642+ vm_flags &= ~VM_MAYEXEC;
44643+#endif
44644+
44645+ }
44646+#endif
44647+
44648 /*
44649 * Adjust stack execute permissions; explicitly enable for
44650 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
44651@@ -717,13 +761,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44652 goto out_unlock;
44653 BUG_ON(prev != vma);
44654
44655- /* Move stack pages down in memory. */
44656- if (stack_shift) {
44657- ret = shift_arg_pages(vma, stack_shift);
44658- if (ret)
44659- goto out_unlock;
44660- }
44661-
44662 /* mprotect_fixup is overkill to remove the temporary stack flags */
44663 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
44664
44665@@ -781,6 +818,8 @@ struct file *open_exec(const char *name)
44666
44667 fsnotify_open(file);
44668
44669+ trace_open_exec(name);
44670+
44671 err = deny_write_access(file);
44672 if (err)
44673 goto exit;
44674@@ -804,7 +843,7 @@ int kernel_read(struct file *file, loff_t offset,
44675 old_fs = get_fs();
44676 set_fs(get_ds());
44677 /* The cast to a user pointer is valid due to the set_fs() */
44678- result = vfs_read(file, (void __user *)addr, count, &pos);
44679+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
44680 set_fs(old_fs);
44681 return result;
44682 }
44683@@ -1257,7 +1296,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
44684 }
44685 rcu_read_unlock();
44686
44687- if (p->fs->users > n_fs) {
44688+ if (atomic_read(&p->fs->users) > n_fs) {
44689 bprm->unsafe |= LSM_UNSAFE_SHARE;
44690 } else {
44691 res = -EAGAIN;
44692@@ -1460,6 +1499,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
44693
44694 EXPORT_SYMBOL(search_binary_handler);
44695
44696+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44697+static DEFINE_PER_CPU(u64, exec_counter);
44698+static int __init init_exec_counters(void)
44699+{
44700+ unsigned int cpu;
44701+
44702+ for_each_possible_cpu(cpu) {
44703+ per_cpu(exec_counter, cpu) = (u64)cpu;
44704+ }
44705+
44706+ return 0;
44707+}
44708+early_initcall(init_exec_counters);
44709+static inline void increment_exec_counter(void)
44710+{
44711+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
44712+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
44713+}
44714+#else
44715+static inline void increment_exec_counter(void) {}
44716+#endif
44717+
44718 /*
44719 * sys_execve() executes a new program.
44720 */
44721@@ -1468,6 +1529,11 @@ static int do_execve_common(const char *filename,
44722 struct user_arg_ptr envp,
44723 struct pt_regs *regs)
44724 {
44725+#ifdef CONFIG_GRKERNSEC
44726+ struct file *old_exec_file;
44727+ struct acl_subject_label *old_acl;
44728+ struct rlimit old_rlim[RLIM_NLIMITS];
44729+#endif
44730 struct linux_binprm *bprm;
44731 struct file *file;
44732 struct files_struct *displaced;
44733@@ -1475,6 +1541,8 @@ static int do_execve_common(const char *filename,
44734 int retval;
44735 const struct cred *cred = current_cred();
44736
44737+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
44738+
44739 /*
44740 * We move the actual failure in case of RLIMIT_NPROC excess from
44741 * set*uid() to execve() because too many poorly written programs
44742@@ -1515,12 +1583,27 @@ static int do_execve_common(const char *filename,
44743 if (IS_ERR(file))
44744 goto out_unmark;
44745
44746+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
44747+ retval = -EPERM;
44748+ goto out_file;
44749+ }
44750+
44751 sched_exec();
44752
44753 bprm->file = file;
44754 bprm->filename = filename;
44755 bprm->interp = filename;
44756
44757+ if (gr_process_user_ban()) {
44758+ retval = -EPERM;
44759+ goto out_file;
44760+ }
44761+
44762+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
44763+ retval = -EACCES;
44764+ goto out_file;
44765+ }
44766+
44767 retval = bprm_mm_init(bprm);
44768 if (retval)
44769 goto out_file;
44770@@ -1537,24 +1620,65 @@ static int do_execve_common(const char *filename,
44771 if (retval < 0)
44772 goto out;
44773
44774+#ifdef CONFIG_GRKERNSEC
44775+ old_acl = current->acl;
44776+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
44777+ old_exec_file = current->exec_file;
44778+ get_file(file);
44779+ current->exec_file = file;
44780+#endif
44781+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44782+ /* limit suid stack to 8MB
44783+ we saved the old limits above and will restore them if this exec fails
44784+ */
44785+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
44786+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
44787+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
44788+#endif
44789+
44790+ if (!gr_tpe_allow(file)) {
44791+ retval = -EACCES;
44792+ goto out_fail;
44793+ }
44794+
44795+ if (gr_check_crash_exec(file)) {
44796+ retval = -EACCES;
44797+ goto out_fail;
44798+ }
44799+
44800+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
44801+ bprm->unsafe);
44802+ if (retval < 0)
44803+ goto out_fail;
44804+
44805 retval = copy_strings_kernel(1, &bprm->filename, bprm);
44806 if (retval < 0)
44807- goto out;
44808+ goto out_fail;
44809
44810 bprm->exec = bprm->p;
44811 retval = copy_strings(bprm->envc, envp, bprm);
44812 if (retval < 0)
44813- goto out;
44814+ goto out_fail;
44815
44816 retval = copy_strings(bprm->argc, argv, bprm);
44817 if (retval < 0)
44818- goto out;
44819+ goto out_fail;
44820+
44821+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
44822+
44823+ gr_handle_exec_args(bprm, argv);
44824
44825 retval = search_binary_handler(bprm,regs);
44826 if (retval < 0)
44827- goto out;
44828+ goto out_fail;
44829+#ifdef CONFIG_GRKERNSEC
44830+ if (old_exec_file)
44831+ fput(old_exec_file);
44832+#endif
44833
44834 /* execve succeeded */
44835+
44836+ increment_exec_counter();
44837 current->fs->in_exec = 0;
44838 current->in_execve = 0;
44839 acct_update_integrals(current);
44840@@ -1563,6 +1687,14 @@ static int do_execve_common(const char *filename,
44841 put_files_struct(displaced);
44842 return retval;
44843
44844+out_fail:
44845+#ifdef CONFIG_GRKERNSEC
44846+ current->acl = old_acl;
44847+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
44848+ fput(current->exec_file);
44849+ current->exec_file = old_exec_file;
44850+#endif
44851+
44852 out:
44853 if (bprm->mm) {
44854 acct_arg_size(bprm, 0);
44855@@ -1636,7 +1768,7 @@ static int expand_corename(struct core_name *cn)
44856 {
44857 char *old_corename = cn->corename;
44858
44859- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
44860+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
44861 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
44862
44863 if (!cn->corename) {
44864@@ -1733,7 +1865,7 @@ static int format_corename(struct core_name *cn, long signr)
44865 int pid_in_pattern = 0;
44866 int err = 0;
44867
44868- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
44869+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
44870 cn->corename = kmalloc(cn->size, GFP_KERNEL);
44871 cn->used = 0;
44872
44873@@ -1830,6 +1962,250 @@ out:
44874 return ispipe;
44875 }
44876
44877+int pax_check_flags(unsigned long *flags)
44878+{
44879+ int retval = 0;
44880+
44881+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
44882+ if (*flags & MF_PAX_SEGMEXEC)
44883+ {
44884+ *flags &= ~MF_PAX_SEGMEXEC;
44885+ retval = -EINVAL;
44886+ }
44887+#endif
44888+
44889+ if ((*flags & MF_PAX_PAGEEXEC)
44890+
44891+#ifdef CONFIG_PAX_PAGEEXEC
44892+ && (*flags & MF_PAX_SEGMEXEC)
44893+#endif
44894+
44895+ )
44896+ {
44897+ *flags &= ~MF_PAX_PAGEEXEC;
44898+ retval = -EINVAL;
44899+ }
44900+
44901+ if ((*flags & MF_PAX_MPROTECT)
44902+
44903+#ifdef CONFIG_PAX_MPROTECT
44904+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44905+#endif
44906+
44907+ )
44908+ {
44909+ *flags &= ~MF_PAX_MPROTECT;
44910+ retval = -EINVAL;
44911+ }
44912+
44913+ if ((*flags & MF_PAX_EMUTRAMP)
44914+
44915+#ifdef CONFIG_PAX_EMUTRAMP
44916+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44917+#endif
44918+
44919+ )
44920+ {
44921+ *flags &= ~MF_PAX_EMUTRAMP;
44922+ retval = -EINVAL;
44923+ }
44924+
44925+ return retval;
44926+}
44927+
44928+EXPORT_SYMBOL(pax_check_flags);
44929+
44930+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44931+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
44932+{
44933+ struct task_struct *tsk = current;
44934+ struct mm_struct *mm = current->mm;
44935+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
44936+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
44937+ char *path_exec = NULL;
44938+ char *path_fault = NULL;
44939+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
44940+
44941+ if (buffer_exec && buffer_fault) {
44942+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
44943+
44944+ down_read(&mm->mmap_sem);
44945+ vma = mm->mmap;
44946+ while (vma && (!vma_exec || !vma_fault)) {
44947+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
44948+ vma_exec = vma;
44949+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
44950+ vma_fault = vma;
44951+ vma = vma->vm_next;
44952+ }
44953+ if (vma_exec) {
44954+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
44955+ if (IS_ERR(path_exec))
44956+ path_exec = "<path too long>";
44957+ else {
44958+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
44959+ if (path_exec) {
44960+ *path_exec = 0;
44961+ path_exec = buffer_exec;
44962+ } else
44963+ path_exec = "<path too long>";
44964+ }
44965+ }
44966+ if (vma_fault) {
44967+ start = vma_fault->vm_start;
44968+ end = vma_fault->vm_end;
44969+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
44970+ if (vma_fault->vm_file) {
44971+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
44972+ if (IS_ERR(path_fault))
44973+ path_fault = "<path too long>";
44974+ else {
44975+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
44976+ if (path_fault) {
44977+ *path_fault = 0;
44978+ path_fault = buffer_fault;
44979+ } else
44980+ path_fault = "<path too long>";
44981+ }
44982+ } else
44983+ path_fault = "<anonymous mapping>";
44984+ }
44985+ up_read(&mm->mmap_sem);
44986+ }
44987+ if (tsk->signal->curr_ip)
44988+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
44989+ else
44990+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
44991+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
44992+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
44993+ task_uid(tsk), task_euid(tsk), pc, sp);
44994+ free_page((unsigned long)buffer_exec);
44995+ free_page((unsigned long)buffer_fault);
44996+ pax_report_insns(regs, pc, sp);
44997+ do_coredump(SIGKILL, SIGKILL, regs);
44998+}
44999+#endif
45000+
45001+#ifdef CONFIG_PAX_REFCOUNT
45002+void pax_report_refcount_overflow(struct pt_regs *regs)
45003+{
45004+ if (current->signal->curr_ip)
45005+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
45006+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
45007+ else
45008+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
45009+ current->comm, task_pid_nr(current), current_uid(), current_euid());
45010+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
45011+ show_regs(regs);
45012+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
45013+}
45014+#endif
45015+
45016+#ifdef CONFIG_PAX_USERCOPY
45017+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
45018+static noinline int check_stack_object(const void *obj, unsigned long len)
45019+{
45020+ const void * const stack = task_stack_page(current);
45021+ const void * const stackend = stack + THREAD_SIZE;
45022+
45023+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
45024+ const void *frame = NULL;
45025+ const void *oldframe;
45026+#endif
45027+
45028+ if (obj + len < obj)
45029+ return -1;
45030+
45031+ if (obj + len <= stack || stackend <= obj)
45032+ return 0;
45033+
45034+ if (obj < stack || stackend < obj + len)
45035+ return -1;
45036+
45037+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
45038+ oldframe = __builtin_frame_address(1);
45039+ if (oldframe)
45040+ frame = __builtin_frame_address(2);
45041+ /*
45042+ low ----------------------------------------------> high
45043+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
45044+ ^----------------^
45045+ allow copies only within here
45046+ */
45047+ while (stack <= frame && frame < stackend) {
45048+ /* if obj + len extends past the last frame, this
45049+ check won't pass and the next frame will be 0,
45050+ causing us to bail out and correctly report
45051+ the copy as invalid
45052+ */
45053+ if (obj + len <= frame)
45054+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
45055+ oldframe = frame;
45056+ frame = *(const void * const *)frame;
45057+ }
45058+ return -1;
45059+#else
45060+ return 1;
45061+#endif
45062+}
45063+
45064+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
45065+{
45066+ if (current->signal->curr_ip)
45067+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
45068+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
45069+ else
45070+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
45071+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
45072+ dump_stack();
45073+ gr_handle_kernel_exploit();
45074+ do_group_exit(SIGKILL);
45075+}
45076+#endif
45077+
45078+void check_object_size(const void *ptr, unsigned long n, bool to)
45079+{
45080+
45081+#ifdef CONFIG_PAX_USERCOPY
45082+ const char *type;
45083+
45084+ if (!n)
45085+ return;
45086+
45087+ type = check_heap_object(ptr, n, to);
45088+ if (!type) {
45089+ if (check_stack_object(ptr, n) != -1)
45090+ return;
45091+ type = "<process stack>";
45092+ }
45093+
45094+ pax_report_usercopy(ptr, n, to, type);
45095+#endif
45096+
45097+}
45098+EXPORT_SYMBOL(check_object_size);
45099+
45100+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
45101+void pax_track_stack(void)
45102+{
45103+ unsigned long sp = (unsigned long)&sp;
45104+ if (sp < current_thread_info()->lowest_stack &&
45105+ sp > (unsigned long)task_stack_page(current))
45106+ current_thread_info()->lowest_stack = sp;
45107+}
45108+EXPORT_SYMBOL(pax_track_stack);
45109+#endif
45110+
45111+#ifdef CONFIG_PAX_SIZE_OVERFLOW
45112+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
45113+{
45114+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
45115+ dump_stack();
45116+ do_group_exit(SIGKILL);
45117+}
45118+EXPORT_SYMBOL(report_size_overflow);
45119+#endif
45120+
45121 static int zap_process(struct task_struct *start, int exit_code)
45122 {
45123 struct task_struct *t;
45124@@ -2002,17 +2378,17 @@ static void coredump_finish(struct mm_struct *mm)
45125 void set_dumpable(struct mm_struct *mm, int value)
45126 {
45127 switch (value) {
45128- case 0:
45129+ case SUID_DUMPABLE_DISABLED:
45130 clear_bit(MMF_DUMPABLE, &mm->flags);
45131 smp_wmb();
45132 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
45133 break;
45134- case 1:
45135+ case SUID_DUMPABLE_ENABLED:
45136 set_bit(MMF_DUMPABLE, &mm->flags);
45137 smp_wmb();
45138 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
45139 break;
45140- case 2:
45141+ case SUID_DUMPABLE_SAFE:
45142 set_bit(MMF_DUMP_SECURELY, &mm->flags);
45143 smp_wmb();
45144 set_bit(MMF_DUMPABLE, &mm->flags);
45145@@ -2025,7 +2401,7 @@ static int __get_dumpable(unsigned long mm_flags)
45146 int ret;
45147
45148 ret = mm_flags & MMF_DUMPABLE_MASK;
45149- return (ret >= 2) ? 2 : ret;
45150+ return (ret > SUID_DUMPABLE_ENABLED) ? SUID_DUMPABLE_SAFE : ret;
45151 }
45152
45153 int get_dumpable(struct mm_struct *mm)
45154@@ -2040,17 +2416,17 @@ static void wait_for_dump_helpers(struct file *file)
45155 pipe = file->f_path.dentry->d_inode->i_pipe;
45156
45157 pipe_lock(pipe);
45158- pipe->readers++;
45159- pipe->writers--;
45160+ atomic_inc(&pipe->readers);
45161+ atomic_dec(&pipe->writers);
45162
45163- while ((pipe->readers > 1) && (!signal_pending(current))) {
45164+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
45165 wake_up_interruptible_sync(&pipe->wait);
45166 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45167 pipe_wait(pipe);
45168 }
45169
45170- pipe->readers--;
45171- pipe->writers++;
45172+ atomic_dec(&pipe->readers);
45173+ atomic_inc(&pipe->writers);
45174 pipe_unlock(pipe);
45175
45176 }
45177@@ -2111,7 +2487,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45178 int retval = 0;
45179 int flag = 0;
45180 int ispipe;
45181- static atomic_t core_dump_count = ATOMIC_INIT(0);
45182+ bool need_nonrelative = false;
45183+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
45184 struct coredump_params cprm = {
45185 .signr = signr,
45186 .regs = regs,
45187@@ -2126,6 +2503,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45188
45189 audit_core_dumps(signr);
45190
45191+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
45192+ gr_handle_brute_attach(current, cprm.mm_flags);
45193+
45194 binfmt = mm->binfmt;
45195 if (!binfmt || !binfmt->core_dump)
45196 goto fail;
45197@@ -2136,14 +2516,16 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45198 if (!cred)
45199 goto fail;
45200 /*
45201- * We cannot trust fsuid as being the "true" uid of the
45202- * process nor do we know its entire history. We only know it
45203- * was tainted so we dump it as root in mode 2.
45204+ * We cannot trust fsuid as being the "true" uid of the process
45205+ * nor do we know its entire history. We only know it was tainted
45206+ * so we dump it as root in mode 2, and only into a controlled
45207+ * environment (pipe handler or fully qualified path).
45208 */
45209- if (__get_dumpable(cprm.mm_flags) == 2) {
45210+ if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) {
45211 /* Setuid core dump mode */
45212 flag = O_EXCL; /* Stop rewrite attacks */
45213 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
45214+ need_nonrelative = true;
45215 }
45216
45217 retval = coredump_wait(exit_code, &core_state);
45218@@ -2193,7 +2575,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45219 }
45220 cprm.limit = RLIM_INFINITY;
45221
45222- dump_count = atomic_inc_return(&core_dump_count);
45223+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
45224 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
45225 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
45226 task_tgid_vnr(current), current->comm);
45227@@ -2220,9 +2602,19 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45228 } else {
45229 struct inode *inode;
45230
45231+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
45232+
45233 if (cprm.limit < binfmt->min_coredump)
45234 goto fail_unlock;
45235
45236+ if (need_nonrelative && cn.corename[0] != '/') {
45237+ printk(KERN_WARNING "Pid %d(%s) can only dump core "\
45238+ "to fully qualified path!\n",
45239+ task_tgid_vnr(current), current->comm);
45240+ printk(KERN_WARNING "Skipping core dump\n");
45241+ goto fail_unlock;
45242+ }
45243+
45244 cprm.file = filp_open(cn.corename,
45245 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
45246 0600);
45247@@ -2263,7 +2655,7 @@ close_fail:
45248 filp_close(cprm.file, NULL);
45249 fail_dropcount:
45250 if (ispipe)
45251- atomic_dec(&core_dump_count);
45252+ atomic_dec_unchecked(&core_dump_count);
45253 fail_unlock:
45254 kfree(cn.corename);
45255 fail_corename:
45256@@ -2282,7 +2674,7 @@ fail:
45257 */
45258 int dump_write(struct file *file, const void *addr, int nr)
45259 {
45260- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
45261+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
45262 }
45263 EXPORT_SYMBOL(dump_write);
45264
45265diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
45266index 1c36139..cf6b350 100644
45267--- a/fs/ext2/balloc.c
45268+++ b/fs/ext2/balloc.c
45269@@ -1190,10 +1190,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
45270
45271 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
45272 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
45273- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
45274+ if (free_blocks < root_blocks + 1 &&
45275 !uid_eq(sbi->s_resuid, current_fsuid()) &&
45276 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
45277- !in_group_p (sbi->s_resgid))) {
45278+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
45279 return 0;
45280 }
45281 return 1;
45282diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
45283index 25cd608..9ed5294 100644
45284--- a/fs/ext3/balloc.c
45285+++ b/fs/ext3/balloc.c
45286@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
45287
45288 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
45289 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
45290- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
45291+ if (free_blocks < root_blocks + 1 &&
45292 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
45293 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
45294- !in_group_p (sbi->s_resgid))) {
45295+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
45296 return 0;
45297 }
45298 return 1;
45299diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
45300index 1b50890..e56c5ad 100644
45301--- a/fs/ext4/balloc.c
45302+++ b/fs/ext4/balloc.c
45303@@ -500,8 +500,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
45304 /* Hm, nope. Are (enough) root reserved clusters available? */
45305 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
45306 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
45307- capable(CAP_SYS_RESOURCE) ||
45308- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
45309+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
45310+ capable_nolog(CAP_SYS_RESOURCE)) {
45311
45312 if (free_clusters >= (nclusters + dirty_clusters))
45313 return 1;
45314diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
45315index 01434f2..bd995b4 100644
45316--- a/fs/ext4/ext4.h
45317+++ b/fs/ext4/ext4.h
45318@@ -1246,19 +1246,19 @@ struct ext4_sb_info {
45319 unsigned long s_mb_last_start;
45320
45321 /* stats for buddy allocator */
45322- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
45323- atomic_t s_bal_success; /* we found long enough chunks */
45324- atomic_t s_bal_allocated; /* in blocks */
45325- atomic_t s_bal_ex_scanned; /* total extents scanned */
45326- atomic_t s_bal_goals; /* goal hits */
45327- atomic_t s_bal_breaks; /* too long searches */
45328- atomic_t s_bal_2orders; /* 2^order hits */
45329+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
45330+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
45331+ atomic_unchecked_t s_bal_allocated; /* in blocks */
45332+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
45333+ atomic_unchecked_t s_bal_goals; /* goal hits */
45334+ atomic_unchecked_t s_bal_breaks; /* too long searches */
45335+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
45336 spinlock_t s_bal_lock;
45337 unsigned long s_mb_buddies_generated;
45338 unsigned long long s_mb_generation_time;
45339- atomic_t s_mb_lost_chunks;
45340- atomic_t s_mb_preallocated;
45341- atomic_t s_mb_discarded;
45342+ atomic_unchecked_t s_mb_lost_chunks;
45343+ atomic_unchecked_t s_mb_preallocated;
45344+ atomic_unchecked_t s_mb_discarded;
45345 atomic_t s_lock_busy;
45346
45347 /* locality groups */
45348diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
45349index 1cd6994..5799d45 100644
45350--- a/fs/ext4/mballoc.c
45351+++ b/fs/ext4/mballoc.c
45352@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
45353 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
45354
45355 if (EXT4_SB(sb)->s_mb_stats)
45356- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
45357+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
45358
45359 break;
45360 }
45361@@ -2041,7 +2041,7 @@ repeat:
45362 ac->ac_status = AC_STATUS_CONTINUE;
45363 ac->ac_flags |= EXT4_MB_HINT_FIRST;
45364 cr = 3;
45365- atomic_inc(&sbi->s_mb_lost_chunks);
45366+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
45367 goto repeat;
45368 }
45369 }
45370@@ -2545,25 +2545,25 @@ int ext4_mb_release(struct super_block *sb)
45371 if (sbi->s_mb_stats) {
45372 ext4_msg(sb, KERN_INFO,
45373 "mballoc: %u blocks %u reqs (%u success)",
45374- atomic_read(&sbi->s_bal_allocated),
45375- atomic_read(&sbi->s_bal_reqs),
45376- atomic_read(&sbi->s_bal_success));
45377+ atomic_read_unchecked(&sbi->s_bal_allocated),
45378+ atomic_read_unchecked(&sbi->s_bal_reqs),
45379+ atomic_read_unchecked(&sbi->s_bal_success));
45380 ext4_msg(sb, KERN_INFO,
45381 "mballoc: %u extents scanned, %u goal hits, "
45382 "%u 2^N hits, %u breaks, %u lost",
45383- atomic_read(&sbi->s_bal_ex_scanned),
45384- atomic_read(&sbi->s_bal_goals),
45385- atomic_read(&sbi->s_bal_2orders),
45386- atomic_read(&sbi->s_bal_breaks),
45387- atomic_read(&sbi->s_mb_lost_chunks));
45388+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
45389+ atomic_read_unchecked(&sbi->s_bal_goals),
45390+ atomic_read_unchecked(&sbi->s_bal_2orders),
45391+ atomic_read_unchecked(&sbi->s_bal_breaks),
45392+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
45393 ext4_msg(sb, KERN_INFO,
45394 "mballoc: %lu generated and it took %Lu",
45395 sbi->s_mb_buddies_generated,
45396 sbi->s_mb_generation_time);
45397 ext4_msg(sb, KERN_INFO,
45398 "mballoc: %u preallocated, %u discarded",
45399- atomic_read(&sbi->s_mb_preallocated),
45400- atomic_read(&sbi->s_mb_discarded));
45401+ atomic_read_unchecked(&sbi->s_mb_preallocated),
45402+ atomic_read_unchecked(&sbi->s_mb_discarded));
45403 }
45404
45405 free_percpu(sbi->s_locality_groups);
45406@@ -3047,16 +3047,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
45407 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
45408
45409 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
45410- atomic_inc(&sbi->s_bal_reqs);
45411- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
45412+ atomic_inc_unchecked(&sbi->s_bal_reqs);
45413+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
45414 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
45415- atomic_inc(&sbi->s_bal_success);
45416- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
45417+ atomic_inc_unchecked(&sbi->s_bal_success);
45418+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
45419 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
45420 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
45421- atomic_inc(&sbi->s_bal_goals);
45422+ atomic_inc_unchecked(&sbi->s_bal_goals);
45423 if (ac->ac_found > sbi->s_mb_max_to_scan)
45424- atomic_inc(&sbi->s_bal_breaks);
45425+ atomic_inc_unchecked(&sbi->s_bal_breaks);
45426 }
45427
45428 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
45429@@ -3456,7 +3456,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
45430 trace_ext4_mb_new_inode_pa(ac, pa);
45431
45432 ext4_mb_use_inode_pa(ac, pa);
45433- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
45434+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
45435
45436 ei = EXT4_I(ac->ac_inode);
45437 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
45438@@ -3516,7 +3516,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
45439 trace_ext4_mb_new_group_pa(ac, pa);
45440
45441 ext4_mb_use_group_pa(ac, pa);
45442- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
45443+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
45444
45445 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
45446 lg = ac->ac_lg;
45447@@ -3605,7 +3605,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
45448 * from the bitmap and continue.
45449 */
45450 }
45451- atomic_add(free, &sbi->s_mb_discarded);
45452+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
45453
45454 return err;
45455 }
45456@@ -3623,7 +3623,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
45457 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
45458 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
45459 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
45460- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
45461+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
45462 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
45463
45464 return 0;
45465diff --git a/fs/fcntl.c b/fs/fcntl.c
45466index 81b70e6..d9ae6cf 100644
45467--- a/fs/fcntl.c
45468+++ b/fs/fcntl.c
45469@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
45470 if (err)
45471 return err;
45472
45473+ if (gr_handle_chroot_fowner(pid, type))
45474+ return -ENOENT;
45475+ if (gr_check_protected_task_fowner(pid, type))
45476+ return -EACCES;
45477+
45478 f_modown(filp, pid, type, force);
45479 return 0;
45480 }
45481@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
45482
45483 static int f_setown_ex(struct file *filp, unsigned long arg)
45484 {
45485- struct f_owner_ex * __user owner_p = (void * __user)arg;
45486+ struct f_owner_ex __user *owner_p = (void __user *)arg;
45487 struct f_owner_ex owner;
45488 struct pid *pid;
45489 int type;
45490@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
45491
45492 static int f_getown_ex(struct file *filp, unsigned long arg)
45493 {
45494- struct f_owner_ex * __user owner_p = (void * __user)arg;
45495+ struct f_owner_ex __user *owner_p = (void __user *)arg;
45496 struct f_owner_ex owner;
45497 int ret = 0;
45498
45499@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
45500 switch (cmd) {
45501 case F_DUPFD:
45502 case F_DUPFD_CLOEXEC:
45503+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
45504 if (arg >= rlimit(RLIMIT_NOFILE))
45505 break;
45506 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
45507diff --git a/fs/fifo.c b/fs/fifo.c
45508index cf6f434..3d7942c 100644
45509--- a/fs/fifo.c
45510+++ b/fs/fifo.c
45511@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
45512 */
45513 filp->f_op = &read_pipefifo_fops;
45514 pipe->r_counter++;
45515- if (pipe->readers++ == 0)
45516+ if (atomic_inc_return(&pipe->readers) == 1)
45517 wake_up_partner(inode);
45518
45519- if (!pipe->writers) {
45520+ if (!atomic_read(&pipe->writers)) {
45521 if ((filp->f_flags & O_NONBLOCK)) {
45522 /* suppress POLLHUP until we have
45523 * seen a writer */
45524@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
45525 * errno=ENXIO when there is no process reading the FIFO.
45526 */
45527 ret = -ENXIO;
45528- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
45529+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
45530 goto err;
45531
45532 filp->f_op = &write_pipefifo_fops;
45533 pipe->w_counter++;
45534- if (!pipe->writers++)
45535+ if (atomic_inc_return(&pipe->writers) == 1)
45536 wake_up_partner(inode);
45537
45538- if (!pipe->readers) {
45539+ if (!atomic_read(&pipe->readers)) {
45540 if (wait_for_partner(inode, &pipe->r_counter))
45541 goto err_wr;
45542 }
45543@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
45544 */
45545 filp->f_op = &rdwr_pipefifo_fops;
45546
45547- pipe->readers++;
45548- pipe->writers++;
45549+ atomic_inc(&pipe->readers);
45550+ atomic_inc(&pipe->writers);
45551 pipe->r_counter++;
45552 pipe->w_counter++;
45553- if (pipe->readers == 1 || pipe->writers == 1)
45554+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
45555 wake_up_partner(inode);
45556 break;
45557
45558@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
45559 return 0;
45560
45561 err_rd:
45562- if (!--pipe->readers)
45563+ if (atomic_dec_and_test(&pipe->readers))
45564 wake_up_interruptible(&pipe->wait);
45565 ret = -ERESTARTSYS;
45566 goto err;
45567
45568 err_wr:
45569- if (!--pipe->writers)
45570+ if (atomic_dec_and_test(&pipe->writers))
45571 wake_up_interruptible(&pipe->wait);
45572 ret = -ERESTARTSYS;
45573 goto err;
45574
45575 err:
45576- if (!pipe->readers && !pipe->writers)
45577+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
45578 free_pipe_info(inode);
45579
45580 err_nocleanup:
45581diff --git a/fs/file.c b/fs/file.c
45582index ba3f605..fade102 100644
45583--- a/fs/file.c
45584+++ b/fs/file.c
45585@@ -15,6 +15,7 @@
45586 #include <linux/slab.h>
45587 #include <linux/vmalloc.h>
45588 #include <linux/file.h>
45589+#include <linux/security.h>
45590 #include <linux/fdtable.h>
45591 #include <linux/bitops.h>
45592 #include <linux/interrupt.h>
45593@@ -255,6 +256,7 @@ int expand_files(struct files_struct *files, int nr)
45594 * N.B. For clone tasks sharing a files structure, this test
45595 * will limit the total number of files that can be opened.
45596 */
45597+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
45598 if (nr >= rlimit(RLIMIT_NOFILE))
45599 return -EMFILE;
45600
45601diff --git a/fs/filesystems.c b/fs/filesystems.c
45602index 96f2428..f5eeb8e 100644
45603--- a/fs/filesystems.c
45604+++ b/fs/filesystems.c
45605@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
45606 int len = dot ? dot - name : strlen(name);
45607
45608 fs = __get_fs_type(name, len);
45609+
45610+#ifdef CONFIG_GRKERNSEC_MODHARDEN
45611+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
45612+#else
45613 if (!fs && (request_module("%.*s", len, name) == 0))
45614+#endif
45615 fs = __get_fs_type(name, len);
45616
45617 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
45618diff --git a/fs/fs_struct.c b/fs/fs_struct.c
45619index e159e68..e7d2a6f 100644
45620--- a/fs/fs_struct.c
45621+++ b/fs/fs_struct.c
45622@@ -4,6 +4,7 @@
45623 #include <linux/path.h>
45624 #include <linux/slab.h>
45625 #include <linux/fs_struct.h>
45626+#include <linux/grsecurity.h>
45627 #include "internal.h"
45628
45629 static inline void path_get_longterm(struct path *path)
45630@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
45631 write_seqcount_begin(&fs->seq);
45632 old_root = fs->root;
45633 fs->root = *path;
45634+ gr_set_chroot_entries(current, path);
45635 write_seqcount_end(&fs->seq);
45636 spin_unlock(&fs->lock);
45637 if (old_root.dentry)
45638@@ -65,6 +67,17 @@ static inline int replace_path(struct path *p, const struct path *old, const str
45639 return 1;
45640 }
45641
45642+static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
45643+{
45644+ if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
45645+ return 0;
45646+ *p = *new;
45647+
45648+ gr_set_chroot_entries(task, new);
45649+
45650+ return 1;
45651+}
45652+
45653 void chroot_fs_refs(struct path *old_root, struct path *new_root)
45654 {
45655 struct task_struct *g, *p;
45656@@ -79,7 +92,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
45657 int hits = 0;
45658 spin_lock(&fs->lock);
45659 write_seqcount_begin(&fs->seq);
45660- hits += replace_path(&fs->root, old_root, new_root);
45661+ hits += replace_root_path(p, &fs->root, old_root, new_root);
45662 hits += replace_path(&fs->pwd, old_root, new_root);
45663 write_seqcount_end(&fs->seq);
45664 while (hits--) {
45665@@ -111,7 +124,8 @@ void exit_fs(struct task_struct *tsk)
45666 task_lock(tsk);
45667 spin_lock(&fs->lock);
45668 tsk->fs = NULL;
45669- kill = !--fs->users;
45670+ gr_clear_chroot_entries(tsk);
45671+ kill = !atomic_dec_return(&fs->users);
45672 spin_unlock(&fs->lock);
45673 task_unlock(tsk);
45674 if (kill)
45675@@ -124,7 +138,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45676 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
45677 /* We don't need to lock fs - think why ;-) */
45678 if (fs) {
45679- fs->users = 1;
45680+ atomic_set(&fs->users, 1);
45681 fs->in_exec = 0;
45682 spin_lock_init(&fs->lock);
45683 seqcount_init(&fs->seq);
45684@@ -133,6 +147,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45685 spin_lock(&old->lock);
45686 fs->root = old->root;
45687 path_get_longterm(&fs->root);
45688+ /* instead of calling gr_set_chroot_entries here,
45689+ we call it from every caller of this function
45690+ */
45691 fs->pwd = old->pwd;
45692 path_get_longterm(&fs->pwd);
45693 spin_unlock(&old->lock);
45694@@ -151,8 +168,9 @@ int unshare_fs_struct(void)
45695
45696 task_lock(current);
45697 spin_lock(&fs->lock);
45698- kill = !--fs->users;
45699+ kill = !atomic_dec_return(&fs->users);
45700 current->fs = new_fs;
45701+ gr_set_chroot_entries(current, &new_fs->root);
45702 spin_unlock(&fs->lock);
45703 task_unlock(current);
45704
45705@@ -165,13 +183,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
45706
45707 int current_umask(void)
45708 {
45709- return current->fs->umask;
45710+ return current->fs->umask | gr_acl_umask();
45711 }
45712 EXPORT_SYMBOL(current_umask);
45713
45714 /* to be mentioned only in INIT_TASK */
45715 struct fs_struct init_fs = {
45716- .users = 1,
45717+ .users = ATOMIC_INIT(1),
45718 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
45719 .seq = SEQCNT_ZERO,
45720 .umask = 0022,
45721@@ -187,12 +205,13 @@ void daemonize_fs_struct(void)
45722 task_lock(current);
45723
45724 spin_lock(&init_fs.lock);
45725- init_fs.users++;
45726+ atomic_inc(&init_fs.users);
45727 spin_unlock(&init_fs.lock);
45728
45729 spin_lock(&fs->lock);
45730 current->fs = &init_fs;
45731- kill = !--fs->users;
45732+ gr_set_chroot_entries(current, &current->fs->root);
45733+ kill = !atomic_dec_return(&fs->users);
45734 spin_unlock(&fs->lock);
45735
45736 task_unlock(current);
45737diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
45738index 9905350..02eaec4 100644
45739--- a/fs/fscache/cookie.c
45740+++ b/fs/fscache/cookie.c
45741@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
45742 parent ? (char *) parent->def->name : "<no-parent>",
45743 def->name, netfs_data);
45744
45745- fscache_stat(&fscache_n_acquires);
45746+ fscache_stat_unchecked(&fscache_n_acquires);
45747
45748 /* if there's no parent cookie, then we don't create one here either */
45749 if (!parent) {
45750- fscache_stat(&fscache_n_acquires_null);
45751+ fscache_stat_unchecked(&fscache_n_acquires_null);
45752 _leave(" [no parent]");
45753 return NULL;
45754 }
45755@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
45756 /* allocate and initialise a cookie */
45757 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
45758 if (!cookie) {
45759- fscache_stat(&fscache_n_acquires_oom);
45760+ fscache_stat_unchecked(&fscache_n_acquires_oom);
45761 _leave(" [ENOMEM]");
45762 return NULL;
45763 }
45764@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45765
45766 switch (cookie->def->type) {
45767 case FSCACHE_COOKIE_TYPE_INDEX:
45768- fscache_stat(&fscache_n_cookie_index);
45769+ fscache_stat_unchecked(&fscache_n_cookie_index);
45770 break;
45771 case FSCACHE_COOKIE_TYPE_DATAFILE:
45772- fscache_stat(&fscache_n_cookie_data);
45773+ fscache_stat_unchecked(&fscache_n_cookie_data);
45774 break;
45775 default:
45776- fscache_stat(&fscache_n_cookie_special);
45777+ fscache_stat_unchecked(&fscache_n_cookie_special);
45778 break;
45779 }
45780
45781@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45782 if (fscache_acquire_non_index_cookie(cookie) < 0) {
45783 atomic_dec(&parent->n_children);
45784 __fscache_cookie_put(cookie);
45785- fscache_stat(&fscache_n_acquires_nobufs);
45786+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
45787 _leave(" = NULL");
45788 return NULL;
45789 }
45790 }
45791
45792- fscache_stat(&fscache_n_acquires_ok);
45793+ fscache_stat_unchecked(&fscache_n_acquires_ok);
45794 _leave(" = %p", cookie);
45795 return cookie;
45796 }
45797@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
45798 cache = fscache_select_cache_for_object(cookie->parent);
45799 if (!cache) {
45800 up_read(&fscache_addremove_sem);
45801- fscache_stat(&fscache_n_acquires_no_cache);
45802+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
45803 _leave(" = -ENOMEDIUM [no cache]");
45804 return -ENOMEDIUM;
45805 }
45806@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
45807 object = cache->ops->alloc_object(cache, cookie);
45808 fscache_stat_d(&fscache_n_cop_alloc_object);
45809 if (IS_ERR(object)) {
45810- fscache_stat(&fscache_n_object_no_alloc);
45811+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
45812 ret = PTR_ERR(object);
45813 goto error;
45814 }
45815
45816- fscache_stat(&fscache_n_object_alloc);
45817+ fscache_stat_unchecked(&fscache_n_object_alloc);
45818
45819 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
45820
45821@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
45822 struct fscache_object *object;
45823 struct hlist_node *_p;
45824
45825- fscache_stat(&fscache_n_updates);
45826+ fscache_stat_unchecked(&fscache_n_updates);
45827
45828 if (!cookie) {
45829- fscache_stat(&fscache_n_updates_null);
45830+ fscache_stat_unchecked(&fscache_n_updates_null);
45831 _leave(" [no cookie]");
45832 return;
45833 }
45834@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45835 struct fscache_object *object;
45836 unsigned long event;
45837
45838- fscache_stat(&fscache_n_relinquishes);
45839+ fscache_stat_unchecked(&fscache_n_relinquishes);
45840 if (retire)
45841- fscache_stat(&fscache_n_relinquishes_retire);
45842+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
45843
45844 if (!cookie) {
45845- fscache_stat(&fscache_n_relinquishes_null);
45846+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
45847 _leave(" [no cookie]");
45848 return;
45849 }
45850@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45851
45852 /* wait for the cookie to finish being instantiated (or to fail) */
45853 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
45854- fscache_stat(&fscache_n_relinquishes_waitcrt);
45855+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
45856 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
45857 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
45858 }
45859diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
45860index f6aad48..88dcf26 100644
45861--- a/fs/fscache/internal.h
45862+++ b/fs/fscache/internal.h
45863@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
45864 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
45865 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
45866
45867-extern atomic_t fscache_n_op_pend;
45868-extern atomic_t fscache_n_op_run;
45869-extern atomic_t fscache_n_op_enqueue;
45870-extern atomic_t fscache_n_op_deferred_release;
45871-extern atomic_t fscache_n_op_release;
45872-extern atomic_t fscache_n_op_gc;
45873-extern atomic_t fscache_n_op_cancelled;
45874-extern atomic_t fscache_n_op_rejected;
45875+extern atomic_unchecked_t fscache_n_op_pend;
45876+extern atomic_unchecked_t fscache_n_op_run;
45877+extern atomic_unchecked_t fscache_n_op_enqueue;
45878+extern atomic_unchecked_t fscache_n_op_deferred_release;
45879+extern atomic_unchecked_t fscache_n_op_release;
45880+extern atomic_unchecked_t fscache_n_op_gc;
45881+extern atomic_unchecked_t fscache_n_op_cancelled;
45882+extern atomic_unchecked_t fscache_n_op_rejected;
45883
45884-extern atomic_t fscache_n_attr_changed;
45885-extern atomic_t fscache_n_attr_changed_ok;
45886-extern atomic_t fscache_n_attr_changed_nobufs;
45887-extern atomic_t fscache_n_attr_changed_nomem;
45888-extern atomic_t fscache_n_attr_changed_calls;
45889+extern atomic_unchecked_t fscache_n_attr_changed;
45890+extern atomic_unchecked_t fscache_n_attr_changed_ok;
45891+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
45892+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
45893+extern atomic_unchecked_t fscache_n_attr_changed_calls;
45894
45895-extern atomic_t fscache_n_allocs;
45896-extern atomic_t fscache_n_allocs_ok;
45897-extern atomic_t fscache_n_allocs_wait;
45898-extern atomic_t fscache_n_allocs_nobufs;
45899-extern atomic_t fscache_n_allocs_intr;
45900-extern atomic_t fscache_n_allocs_object_dead;
45901-extern atomic_t fscache_n_alloc_ops;
45902-extern atomic_t fscache_n_alloc_op_waits;
45903+extern atomic_unchecked_t fscache_n_allocs;
45904+extern atomic_unchecked_t fscache_n_allocs_ok;
45905+extern atomic_unchecked_t fscache_n_allocs_wait;
45906+extern atomic_unchecked_t fscache_n_allocs_nobufs;
45907+extern atomic_unchecked_t fscache_n_allocs_intr;
45908+extern atomic_unchecked_t fscache_n_allocs_object_dead;
45909+extern atomic_unchecked_t fscache_n_alloc_ops;
45910+extern atomic_unchecked_t fscache_n_alloc_op_waits;
45911
45912-extern atomic_t fscache_n_retrievals;
45913-extern atomic_t fscache_n_retrievals_ok;
45914-extern atomic_t fscache_n_retrievals_wait;
45915-extern atomic_t fscache_n_retrievals_nodata;
45916-extern atomic_t fscache_n_retrievals_nobufs;
45917-extern atomic_t fscache_n_retrievals_intr;
45918-extern atomic_t fscache_n_retrievals_nomem;
45919-extern atomic_t fscache_n_retrievals_object_dead;
45920-extern atomic_t fscache_n_retrieval_ops;
45921-extern atomic_t fscache_n_retrieval_op_waits;
45922+extern atomic_unchecked_t fscache_n_retrievals;
45923+extern atomic_unchecked_t fscache_n_retrievals_ok;
45924+extern atomic_unchecked_t fscache_n_retrievals_wait;
45925+extern atomic_unchecked_t fscache_n_retrievals_nodata;
45926+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
45927+extern atomic_unchecked_t fscache_n_retrievals_intr;
45928+extern atomic_unchecked_t fscache_n_retrievals_nomem;
45929+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
45930+extern atomic_unchecked_t fscache_n_retrieval_ops;
45931+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
45932
45933-extern atomic_t fscache_n_stores;
45934-extern atomic_t fscache_n_stores_ok;
45935-extern atomic_t fscache_n_stores_again;
45936-extern atomic_t fscache_n_stores_nobufs;
45937-extern atomic_t fscache_n_stores_oom;
45938-extern atomic_t fscache_n_store_ops;
45939-extern atomic_t fscache_n_store_calls;
45940-extern atomic_t fscache_n_store_pages;
45941-extern atomic_t fscache_n_store_radix_deletes;
45942-extern atomic_t fscache_n_store_pages_over_limit;
45943+extern atomic_unchecked_t fscache_n_stores;
45944+extern atomic_unchecked_t fscache_n_stores_ok;
45945+extern atomic_unchecked_t fscache_n_stores_again;
45946+extern atomic_unchecked_t fscache_n_stores_nobufs;
45947+extern atomic_unchecked_t fscache_n_stores_oom;
45948+extern atomic_unchecked_t fscache_n_store_ops;
45949+extern atomic_unchecked_t fscache_n_store_calls;
45950+extern atomic_unchecked_t fscache_n_store_pages;
45951+extern atomic_unchecked_t fscache_n_store_radix_deletes;
45952+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
45953
45954-extern atomic_t fscache_n_store_vmscan_not_storing;
45955-extern atomic_t fscache_n_store_vmscan_gone;
45956-extern atomic_t fscache_n_store_vmscan_busy;
45957-extern atomic_t fscache_n_store_vmscan_cancelled;
45958+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45959+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
45960+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
45961+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45962
45963-extern atomic_t fscache_n_marks;
45964-extern atomic_t fscache_n_uncaches;
45965+extern atomic_unchecked_t fscache_n_marks;
45966+extern atomic_unchecked_t fscache_n_uncaches;
45967
45968-extern atomic_t fscache_n_acquires;
45969-extern atomic_t fscache_n_acquires_null;
45970-extern atomic_t fscache_n_acquires_no_cache;
45971-extern atomic_t fscache_n_acquires_ok;
45972-extern atomic_t fscache_n_acquires_nobufs;
45973-extern atomic_t fscache_n_acquires_oom;
45974+extern atomic_unchecked_t fscache_n_acquires;
45975+extern atomic_unchecked_t fscache_n_acquires_null;
45976+extern atomic_unchecked_t fscache_n_acquires_no_cache;
45977+extern atomic_unchecked_t fscache_n_acquires_ok;
45978+extern atomic_unchecked_t fscache_n_acquires_nobufs;
45979+extern atomic_unchecked_t fscache_n_acquires_oom;
45980
45981-extern atomic_t fscache_n_updates;
45982-extern atomic_t fscache_n_updates_null;
45983-extern atomic_t fscache_n_updates_run;
45984+extern atomic_unchecked_t fscache_n_updates;
45985+extern atomic_unchecked_t fscache_n_updates_null;
45986+extern atomic_unchecked_t fscache_n_updates_run;
45987
45988-extern atomic_t fscache_n_relinquishes;
45989-extern atomic_t fscache_n_relinquishes_null;
45990-extern atomic_t fscache_n_relinquishes_waitcrt;
45991-extern atomic_t fscache_n_relinquishes_retire;
45992+extern atomic_unchecked_t fscache_n_relinquishes;
45993+extern atomic_unchecked_t fscache_n_relinquishes_null;
45994+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45995+extern atomic_unchecked_t fscache_n_relinquishes_retire;
45996
45997-extern atomic_t fscache_n_cookie_index;
45998-extern atomic_t fscache_n_cookie_data;
45999-extern atomic_t fscache_n_cookie_special;
46000+extern atomic_unchecked_t fscache_n_cookie_index;
46001+extern atomic_unchecked_t fscache_n_cookie_data;
46002+extern atomic_unchecked_t fscache_n_cookie_special;
46003
46004-extern atomic_t fscache_n_object_alloc;
46005-extern atomic_t fscache_n_object_no_alloc;
46006-extern atomic_t fscache_n_object_lookups;
46007-extern atomic_t fscache_n_object_lookups_negative;
46008-extern atomic_t fscache_n_object_lookups_positive;
46009-extern atomic_t fscache_n_object_lookups_timed_out;
46010-extern atomic_t fscache_n_object_created;
46011-extern atomic_t fscache_n_object_avail;
46012-extern atomic_t fscache_n_object_dead;
46013+extern atomic_unchecked_t fscache_n_object_alloc;
46014+extern atomic_unchecked_t fscache_n_object_no_alloc;
46015+extern atomic_unchecked_t fscache_n_object_lookups;
46016+extern atomic_unchecked_t fscache_n_object_lookups_negative;
46017+extern atomic_unchecked_t fscache_n_object_lookups_positive;
46018+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
46019+extern atomic_unchecked_t fscache_n_object_created;
46020+extern atomic_unchecked_t fscache_n_object_avail;
46021+extern atomic_unchecked_t fscache_n_object_dead;
46022
46023-extern atomic_t fscache_n_checkaux_none;
46024-extern atomic_t fscache_n_checkaux_okay;
46025-extern atomic_t fscache_n_checkaux_update;
46026-extern atomic_t fscache_n_checkaux_obsolete;
46027+extern atomic_unchecked_t fscache_n_checkaux_none;
46028+extern atomic_unchecked_t fscache_n_checkaux_okay;
46029+extern atomic_unchecked_t fscache_n_checkaux_update;
46030+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
46031
46032 extern atomic_t fscache_n_cop_alloc_object;
46033 extern atomic_t fscache_n_cop_lookup_object;
46034@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
46035 atomic_inc(stat);
46036 }
46037
46038+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
46039+{
46040+ atomic_inc_unchecked(stat);
46041+}
46042+
46043 static inline void fscache_stat_d(atomic_t *stat)
46044 {
46045 atomic_dec(stat);
46046@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
46047
46048 #define __fscache_stat(stat) (NULL)
46049 #define fscache_stat(stat) do {} while (0)
46050+#define fscache_stat_unchecked(stat) do {} while (0)
46051 #define fscache_stat_d(stat) do {} while (0)
46052 #endif
46053
46054diff --git a/fs/fscache/object.c b/fs/fscache/object.c
46055index b6b897c..0ffff9c 100644
46056--- a/fs/fscache/object.c
46057+++ b/fs/fscache/object.c
46058@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
46059 /* update the object metadata on disk */
46060 case FSCACHE_OBJECT_UPDATING:
46061 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
46062- fscache_stat(&fscache_n_updates_run);
46063+ fscache_stat_unchecked(&fscache_n_updates_run);
46064 fscache_stat(&fscache_n_cop_update_object);
46065 object->cache->ops->update_object(object);
46066 fscache_stat_d(&fscache_n_cop_update_object);
46067@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
46068 spin_lock(&object->lock);
46069 object->state = FSCACHE_OBJECT_DEAD;
46070 spin_unlock(&object->lock);
46071- fscache_stat(&fscache_n_object_dead);
46072+ fscache_stat_unchecked(&fscache_n_object_dead);
46073 goto terminal_transit;
46074
46075 /* handle the parent cache of this object being withdrawn from
46076@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
46077 spin_lock(&object->lock);
46078 object->state = FSCACHE_OBJECT_DEAD;
46079 spin_unlock(&object->lock);
46080- fscache_stat(&fscache_n_object_dead);
46081+ fscache_stat_unchecked(&fscache_n_object_dead);
46082 goto terminal_transit;
46083
46084 /* complain about the object being woken up once it is
46085@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
46086 parent->cookie->def->name, cookie->def->name,
46087 object->cache->tag->name);
46088
46089- fscache_stat(&fscache_n_object_lookups);
46090+ fscache_stat_unchecked(&fscache_n_object_lookups);
46091 fscache_stat(&fscache_n_cop_lookup_object);
46092 ret = object->cache->ops->lookup_object(object);
46093 fscache_stat_d(&fscache_n_cop_lookup_object);
46094@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
46095 if (ret == -ETIMEDOUT) {
46096 /* probably stuck behind another object, so move this one to
46097 * the back of the queue */
46098- fscache_stat(&fscache_n_object_lookups_timed_out);
46099+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
46100 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
46101 }
46102
46103@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
46104
46105 spin_lock(&object->lock);
46106 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
46107- fscache_stat(&fscache_n_object_lookups_negative);
46108+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
46109
46110 /* transit here to allow write requests to begin stacking up
46111 * and read requests to begin returning ENODATA */
46112@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
46113 * result, in which case there may be data available */
46114 spin_lock(&object->lock);
46115 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
46116- fscache_stat(&fscache_n_object_lookups_positive);
46117+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
46118
46119 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
46120
46121@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
46122 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
46123 } else {
46124 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
46125- fscache_stat(&fscache_n_object_created);
46126+ fscache_stat_unchecked(&fscache_n_object_created);
46127
46128 object->state = FSCACHE_OBJECT_AVAILABLE;
46129 spin_unlock(&object->lock);
46130@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
46131 fscache_enqueue_dependents(object);
46132
46133 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
46134- fscache_stat(&fscache_n_object_avail);
46135+ fscache_stat_unchecked(&fscache_n_object_avail);
46136
46137 _leave("");
46138 }
46139@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
46140 enum fscache_checkaux result;
46141
46142 if (!object->cookie->def->check_aux) {
46143- fscache_stat(&fscache_n_checkaux_none);
46144+ fscache_stat_unchecked(&fscache_n_checkaux_none);
46145 return FSCACHE_CHECKAUX_OKAY;
46146 }
46147
46148@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
46149 switch (result) {
46150 /* entry okay as is */
46151 case FSCACHE_CHECKAUX_OKAY:
46152- fscache_stat(&fscache_n_checkaux_okay);
46153+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
46154 break;
46155
46156 /* entry requires update */
46157 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
46158- fscache_stat(&fscache_n_checkaux_update);
46159+ fscache_stat_unchecked(&fscache_n_checkaux_update);
46160 break;
46161
46162 /* entry requires deletion */
46163 case FSCACHE_CHECKAUX_OBSOLETE:
46164- fscache_stat(&fscache_n_checkaux_obsolete);
46165+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
46166 break;
46167
46168 default:
46169diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
46170index 30afdfa..2256596 100644
46171--- a/fs/fscache/operation.c
46172+++ b/fs/fscache/operation.c
46173@@ -17,7 +17,7 @@
46174 #include <linux/slab.h>
46175 #include "internal.h"
46176
46177-atomic_t fscache_op_debug_id;
46178+atomic_unchecked_t fscache_op_debug_id;
46179 EXPORT_SYMBOL(fscache_op_debug_id);
46180
46181 /**
46182@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
46183 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
46184 ASSERTCMP(atomic_read(&op->usage), >, 0);
46185
46186- fscache_stat(&fscache_n_op_enqueue);
46187+ fscache_stat_unchecked(&fscache_n_op_enqueue);
46188 switch (op->flags & FSCACHE_OP_TYPE) {
46189 case FSCACHE_OP_ASYNC:
46190 _debug("queue async");
46191@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
46192 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
46193 if (op->processor)
46194 fscache_enqueue_operation(op);
46195- fscache_stat(&fscache_n_op_run);
46196+ fscache_stat_unchecked(&fscache_n_op_run);
46197 }
46198
46199 /*
46200@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
46201 if (object->n_ops > 1) {
46202 atomic_inc(&op->usage);
46203 list_add_tail(&op->pend_link, &object->pending_ops);
46204- fscache_stat(&fscache_n_op_pend);
46205+ fscache_stat_unchecked(&fscache_n_op_pend);
46206 } else if (!list_empty(&object->pending_ops)) {
46207 atomic_inc(&op->usage);
46208 list_add_tail(&op->pend_link, &object->pending_ops);
46209- fscache_stat(&fscache_n_op_pend);
46210+ fscache_stat_unchecked(&fscache_n_op_pend);
46211 fscache_start_operations(object);
46212 } else {
46213 ASSERTCMP(object->n_in_progress, ==, 0);
46214@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
46215 object->n_exclusive++; /* reads and writes must wait */
46216 atomic_inc(&op->usage);
46217 list_add_tail(&op->pend_link, &object->pending_ops);
46218- fscache_stat(&fscache_n_op_pend);
46219+ fscache_stat_unchecked(&fscache_n_op_pend);
46220 ret = 0;
46221 } else {
46222 /* not allowed to submit ops in any other state */
46223@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
46224 if (object->n_exclusive > 0) {
46225 atomic_inc(&op->usage);
46226 list_add_tail(&op->pend_link, &object->pending_ops);
46227- fscache_stat(&fscache_n_op_pend);
46228+ fscache_stat_unchecked(&fscache_n_op_pend);
46229 } else if (!list_empty(&object->pending_ops)) {
46230 atomic_inc(&op->usage);
46231 list_add_tail(&op->pend_link, &object->pending_ops);
46232- fscache_stat(&fscache_n_op_pend);
46233+ fscache_stat_unchecked(&fscache_n_op_pend);
46234 fscache_start_operations(object);
46235 } else {
46236 ASSERTCMP(object->n_exclusive, ==, 0);
46237@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
46238 object->n_ops++;
46239 atomic_inc(&op->usage);
46240 list_add_tail(&op->pend_link, &object->pending_ops);
46241- fscache_stat(&fscache_n_op_pend);
46242+ fscache_stat_unchecked(&fscache_n_op_pend);
46243 ret = 0;
46244 } else if (object->state == FSCACHE_OBJECT_DYING ||
46245 object->state == FSCACHE_OBJECT_LC_DYING ||
46246 object->state == FSCACHE_OBJECT_WITHDRAWING) {
46247- fscache_stat(&fscache_n_op_rejected);
46248+ fscache_stat_unchecked(&fscache_n_op_rejected);
46249 ret = -ENOBUFS;
46250 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
46251 fscache_report_unexpected_submission(object, op, ostate);
46252@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
46253
46254 ret = -EBUSY;
46255 if (!list_empty(&op->pend_link)) {
46256- fscache_stat(&fscache_n_op_cancelled);
46257+ fscache_stat_unchecked(&fscache_n_op_cancelled);
46258 list_del_init(&op->pend_link);
46259 object->n_ops--;
46260 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
46261@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
46262 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
46263 BUG();
46264
46265- fscache_stat(&fscache_n_op_release);
46266+ fscache_stat_unchecked(&fscache_n_op_release);
46267
46268 if (op->release) {
46269 op->release(op);
46270@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
46271 * lock, and defer it otherwise */
46272 if (!spin_trylock(&object->lock)) {
46273 _debug("defer put");
46274- fscache_stat(&fscache_n_op_deferred_release);
46275+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
46276
46277 cache = object->cache;
46278 spin_lock(&cache->op_gc_list_lock);
46279@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
46280
46281 _debug("GC DEFERRED REL OBJ%x OP%x",
46282 object->debug_id, op->debug_id);
46283- fscache_stat(&fscache_n_op_gc);
46284+ fscache_stat_unchecked(&fscache_n_op_gc);
46285
46286 ASSERTCMP(atomic_read(&op->usage), ==, 0);
46287
46288diff --git a/fs/fscache/page.c b/fs/fscache/page.c
46289index 3f7a59b..cf196cc 100644
46290--- a/fs/fscache/page.c
46291+++ b/fs/fscache/page.c
46292@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
46293 val = radix_tree_lookup(&cookie->stores, page->index);
46294 if (!val) {
46295 rcu_read_unlock();
46296- fscache_stat(&fscache_n_store_vmscan_not_storing);
46297+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
46298 __fscache_uncache_page(cookie, page);
46299 return true;
46300 }
46301@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
46302 spin_unlock(&cookie->stores_lock);
46303
46304 if (xpage) {
46305- fscache_stat(&fscache_n_store_vmscan_cancelled);
46306- fscache_stat(&fscache_n_store_radix_deletes);
46307+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
46308+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
46309 ASSERTCMP(xpage, ==, page);
46310 } else {
46311- fscache_stat(&fscache_n_store_vmscan_gone);
46312+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
46313 }
46314
46315 wake_up_bit(&cookie->flags, 0);
46316@@ -107,7 +107,7 @@ page_busy:
46317 /* we might want to wait here, but that could deadlock the allocator as
46318 * the work threads writing to the cache may all end up sleeping
46319 * on memory allocation */
46320- fscache_stat(&fscache_n_store_vmscan_busy);
46321+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
46322 return false;
46323 }
46324 EXPORT_SYMBOL(__fscache_maybe_release_page);
46325@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
46326 FSCACHE_COOKIE_STORING_TAG);
46327 if (!radix_tree_tag_get(&cookie->stores, page->index,
46328 FSCACHE_COOKIE_PENDING_TAG)) {
46329- fscache_stat(&fscache_n_store_radix_deletes);
46330+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
46331 xpage = radix_tree_delete(&cookie->stores, page->index);
46332 }
46333 spin_unlock(&cookie->stores_lock);
46334@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
46335
46336 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
46337
46338- fscache_stat(&fscache_n_attr_changed_calls);
46339+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
46340
46341 if (fscache_object_is_active(object)) {
46342 fscache_stat(&fscache_n_cop_attr_changed);
46343@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
46344
46345 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46346
46347- fscache_stat(&fscache_n_attr_changed);
46348+ fscache_stat_unchecked(&fscache_n_attr_changed);
46349
46350 op = kzalloc(sizeof(*op), GFP_KERNEL);
46351 if (!op) {
46352- fscache_stat(&fscache_n_attr_changed_nomem);
46353+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
46354 _leave(" = -ENOMEM");
46355 return -ENOMEM;
46356 }
46357@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
46358 if (fscache_submit_exclusive_op(object, op) < 0)
46359 goto nobufs;
46360 spin_unlock(&cookie->lock);
46361- fscache_stat(&fscache_n_attr_changed_ok);
46362+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
46363 fscache_put_operation(op);
46364 _leave(" = 0");
46365 return 0;
46366@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
46367 nobufs:
46368 spin_unlock(&cookie->lock);
46369 kfree(op);
46370- fscache_stat(&fscache_n_attr_changed_nobufs);
46371+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
46372 _leave(" = %d", -ENOBUFS);
46373 return -ENOBUFS;
46374 }
46375@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
46376 /* allocate a retrieval operation and attempt to submit it */
46377 op = kzalloc(sizeof(*op), GFP_NOIO);
46378 if (!op) {
46379- fscache_stat(&fscache_n_retrievals_nomem);
46380+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46381 return NULL;
46382 }
46383
46384@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
46385 return 0;
46386 }
46387
46388- fscache_stat(&fscache_n_retrievals_wait);
46389+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
46390
46391 jif = jiffies;
46392 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
46393 fscache_wait_bit_interruptible,
46394 TASK_INTERRUPTIBLE) != 0) {
46395- fscache_stat(&fscache_n_retrievals_intr);
46396+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
46397 _leave(" = -ERESTARTSYS");
46398 return -ERESTARTSYS;
46399 }
46400@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
46401 */
46402 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46403 struct fscache_retrieval *op,
46404- atomic_t *stat_op_waits,
46405- atomic_t *stat_object_dead)
46406+ atomic_unchecked_t *stat_op_waits,
46407+ atomic_unchecked_t *stat_object_dead)
46408 {
46409 int ret;
46410
46411@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46412 goto check_if_dead;
46413
46414 _debug(">>> WT");
46415- fscache_stat(stat_op_waits);
46416+ fscache_stat_unchecked(stat_op_waits);
46417 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
46418 fscache_wait_bit_interruptible,
46419 TASK_INTERRUPTIBLE) < 0) {
46420@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46421
46422 check_if_dead:
46423 if (unlikely(fscache_object_is_dead(object))) {
46424- fscache_stat(stat_object_dead);
46425+ fscache_stat_unchecked(stat_object_dead);
46426 return -ENOBUFS;
46427 }
46428 return 0;
46429@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
46430
46431 _enter("%p,%p,,,", cookie, page);
46432
46433- fscache_stat(&fscache_n_retrievals);
46434+ fscache_stat_unchecked(&fscache_n_retrievals);
46435
46436 if (hlist_empty(&cookie->backing_objects))
46437 goto nobufs;
46438@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
46439 goto nobufs_unlock;
46440 spin_unlock(&cookie->lock);
46441
46442- fscache_stat(&fscache_n_retrieval_ops);
46443+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
46444
46445 /* pin the netfs read context in case we need to do the actual netfs
46446 * read because we've encountered a cache read failure */
46447@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
46448
46449 error:
46450 if (ret == -ENOMEM)
46451- fscache_stat(&fscache_n_retrievals_nomem);
46452+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46453 else if (ret == -ERESTARTSYS)
46454- fscache_stat(&fscache_n_retrievals_intr);
46455+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
46456 else if (ret == -ENODATA)
46457- fscache_stat(&fscache_n_retrievals_nodata);
46458+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
46459 else if (ret < 0)
46460- fscache_stat(&fscache_n_retrievals_nobufs);
46461+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46462 else
46463- fscache_stat(&fscache_n_retrievals_ok);
46464+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
46465
46466 fscache_put_retrieval(op);
46467 _leave(" = %d", ret);
46468@@ -429,7 +429,7 @@ nobufs_unlock:
46469 spin_unlock(&cookie->lock);
46470 kfree(op);
46471 nobufs:
46472- fscache_stat(&fscache_n_retrievals_nobufs);
46473+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46474 _leave(" = -ENOBUFS");
46475 return -ENOBUFS;
46476 }
46477@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
46478
46479 _enter("%p,,%d,,,", cookie, *nr_pages);
46480
46481- fscache_stat(&fscache_n_retrievals);
46482+ fscache_stat_unchecked(&fscache_n_retrievals);
46483
46484 if (hlist_empty(&cookie->backing_objects))
46485 goto nobufs;
46486@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
46487 goto nobufs_unlock;
46488 spin_unlock(&cookie->lock);
46489
46490- fscache_stat(&fscache_n_retrieval_ops);
46491+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
46492
46493 /* pin the netfs read context in case we need to do the actual netfs
46494 * read because we've encountered a cache read failure */
46495@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
46496
46497 error:
46498 if (ret == -ENOMEM)
46499- fscache_stat(&fscache_n_retrievals_nomem);
46500+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46501 else if (ret == -ERESTARTSYS)
46502- fscache_stat(&fscache_n_retrievals_intr);
46503+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
46504 else if (ret == -ENODATA)
46505- fscache_stat(&fscache_n_retrievals_nodata);
46506+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
46507 else if (ret < 0)
46508- fscache_stat(&fscache_n_retrievals_nobufs);
46509+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46510 else
46511- fscache_stat(&fscache_n_retrievals_ok);
46512+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
46513
46514 fscache_put_retrieval(op);
46515 _leave(" = %d", ret);
46516@@ -545,7 +545,7 @@ nobufs_unlock:
46517 spin_unlock(&cookie->lock);
46518 kfree(op);
46519 nobufs:
46520- fscache_stat(&fscache_n_retrievals_nobufs);
46521+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46522 _leave(" = -ENOBUFS");
46523 return -ENOBUFS;
46524 }
46525@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
46526
46527 _enter("%p,%p,,,", cookie, page);
46528
46529- fscache_stat(&fscache_n_allocs);
46530+ fscache_stat_unchecked(&fscache_n_allocs);
46531
46532 if (hlist_empty(&cookie->backing_objects))
46533 goto nobufs;
46534@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
46535 goto nobufs_unlock;
46536 spin_unlock(&cookie->lock);
46537
46538- fscache_stat(&fscache_n_alloc_ops);
46539+ fscache_stat_unchecked(&fscache_n_alloc_ops);
46540
46541 ret = fscache_wait_for_retrieval_activation(
46542 object, op,
46543@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
46544
46545 error:
46546 if (ret == -ERESTARTSYS)
46547- fscache_stat(&fscache_n_allocs_intr);
46548+ fscache_stat_unchecked(&fscache_n_allocs_intr);
46549 else if (ret < 0)
46550- fscache_stat(&fscache_n_allocs_nobufs);
46551+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
46552 else
46553- fscache_stat(&fscache_n_allocs_ok);
46554+ fscache_stat_unchecked(&fscache_n_allocs_ok);
46555
46556 fscache_put_retrieval(op);
46557 _leave(" = %d", ret);
46558@@ -625,7 +625,7 @@ nobufs_unlock:
46559 spin_unlock(&cookie->lock);
46560 kfree(op);
46561 nobufs:
46562- fscache_stat(&fscache_n_allocs_nobufs);
46563+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
46564 _leave(" = -ENOBUFS");
46565 return -ENOBUFS;
46566 }
46567@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
46568
46569 spin_lock(&cookie->stores_lock);
46570
46571- fscache_stat(&fscache_n_store_calls);
46572+ fscache_stat_unchecked(&fscache_n_store_calls);
46573
46574 /* find a page to store */
46575 page = NULL;
46576@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
46577 page = results[0];
46578 _debug("gang %d [%lx]", n, page->index);
46579 if (page->index > op->store_limit) {
46580- fscache_stat(&fscache_n_store_pages_over_limit);
46581+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
46582 goto superseded;
46583 }
46584
46585@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
46586 spin_unlock(&cookie->stores_lock);
46587 spin_unlock(&object->lock);
46588
46589- fscache_stat(&fscache_n_store_pages);
46590+ fscache_stat_unchecked(&fscache_n_store_pages);
46591 fscache_stat(&fscache_n_cop_write_page);
46592 ret = object->cache->ops->write_page(op, page);
46593 fscache_stat_d(&fscache_n_cop_write_page);
46594@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46595 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46596 ASSERT(PageFsCache(page));
46597
46598- fscache_stat(&fscache_n_stores);
46599+ fscache_stat_unchecked(&fscache_n_stores);
46600
46601 op = kzalloc(sizeof(*op), GFP_NOIO);
46602 if (!op)
46603@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46604 spin_unlock(&cookie->stores_lock);
46605 spin_unlock(&object->lock);
46606
46607- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
46608+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
46609 op->store_limit = object->store_limit;
46610
46611 if (fscache_submit_op(object, &op->op) < 0)
46612@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46613
46614 spin_unlock(&cookie->lock);
46615 radix_tree_preload_end();
46616- fscache_stat(&fscache_n_store_ops);
46617- fscache_stat(&fscache_n_stores_ok);
46618+ fscache_stat_unchecked(&fscache_n_store_ops);
46619+ fscache_stat_unchecked(&fscache_n_stores_ok);
46620
46621 /* the work queue now carries its own ref on the object */
46622 fscache_put_operation(&op->op);
46623@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46624 return 0;
46625
46626 already_queued:
46627- fscache_stat(&fscache_n_stores_again);
46628+ fscache_stat_unchecked(&fscache_n_stores_again);
46629 already_pending:
46630 spin_unlock(&cookie->stores_lock);
46631 spin_unlock(&object->lock);
46632 spin_unlock(&cookie->lock);
46633 radix_tree_preload_end();
46634 kfree(op);
46635- fscache_stat(&fscache_n_stores_ok);
46636+ fscache_stat_unchecked(&fscache_n_stores_ok);
46637 _leave(" = 0");
46638 return 0;
46639
46640@@ -851,14 +851,14 @@ nobufs:
46641 spin_unlock(&cookie->lock);
46642 radix_tree_preload_end();
46643 kfree(op);
46644- fscache_stat(&fscache_n_stores_nobufs);
46645+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
46646 _leave(" = -ENOBUFS");
46647 return -ENOBUFS;
46648
46649 nomem_free:
46650 kfree(op);
46651 nomem:
46652- fscache_stat(&fscache_n_stores_oom);
46653+ fscache_stat_unchecked(&fscache_n_stores_oom);
46654 _leave(" = -ENOMEM");
46655 return -ENOMEM;
46656 }
46657@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
46658 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46659 ASSERTCMP(page, !=, NULL);
46660
46661- fscache_stat(&fscache_n_uncaches);
46662+ fscache_stat_unchecked(&fscache_n_uncaches);
46663
46664 /* cache withdrawal may beat us to it */
46665 if (!PageFsCache(page))
46666@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
46667 unsigned long loop;
46668
46669 #ifdef CONFIG_FSCACHE_STATS
46670- atomic_add(pagevec->nr, &fscache_n_marks);
46671+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
46672 #endif
46673
46674 for (loop = 0; loop < pagevec->nr; loop++) {
46675diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
46676index 4765190..2a067f2 100644
46677--- a/fs/fscache/stats.c
46678+++ b/fs/fscache/stats.c
46679@@ -18,95 +18,95 @@
46680 /*
46681 * operation counters
46682 */
46683-atomic_t fscache_n_op_pend;
46684-atomic_t fscache_n_op_run;
46685-atomic_t fscache_n_op_enqueue;
46686-atomic_t fscache_n_op_requeue;
46687-atomic_t fscache_n_op_deferred_release;
46688-atomic_t fscache_n_op_release;
46689-atomic_t fscache_n_op_gc;
46690-atomic_t fscache_n_op_cancelled;
46691-atomic_t fscache_n_op_rejected;
46692+atomic_unchecked_t fscache_n_op_pend;
46693+atomic_unchecked_t fscache_n_op_run;
46694+atomic_unchecked_t fscache_n_op_enqueue;
46695+atomic_unchecked_t fscache_n_op_requeue;
46696+atomic_unchecked_t fscache_n_op_deferred_release;
46697+atomic_unchecked_t fscache_n_op_release;
46698+atomic_unchecked_t fscache_n_op_gc;
46699+atomic_unchecked_t fscache_n_op_cancelled;
46700+atomic_unchecked_t fscache_n_op_rejected;
46701
46702-atomic_t fscache_n_attr_changed;
46703-atomic_t fscache_n_attr_changed_ok;
46704-atomic_t fscache_n_attr_changed_nobufs;
46705-atomic_t fscache_n_attr_changed_nomem;
46706-atomic_t fscache_n_attr_changed_calls;
46707+atomic_unchecked_t fscache_n_attr_changed;
46708+atomic_unchecked_t fscache_n_attr_changed_ok;
46709+atomic_unchecked_t fscache_n_attr_changed_nobufs;
46710+atomic_unchecked_t fscache_n_attr_changed_nomem;
46711+atomic_unchecked_t fscache_n_attr_changed_calls;
46712
46713-atomic_t fscache_n_allocs;
46714-atomic_t fscache_n_allocs_ok;
46715-atomic_t fscache_n_allocs_wait;
46716-atomic_t fscache_n_allocs_nobufs;
46717-atomic_t fscache_n_allocs_intr;
46718-atomic_t fscache_n_allocs_object_dead;
46719-atomic_t fscache_n_alloc_ops;
46720-atomic_t fscache_n_alloc_op_waits;
46721+atomic_unchecked_t fscache_n_allocs;
46722+atomic_unchecked_t fscache_n_allocs_ok;
46723+atomic_unchecked_t fscache_n_allocs_wait;
46724+atomic_unchecked_t fscache_n_allocs_nobufs;
46725+atomic_unchecked_t fscache_n_allocs_intr;
46726+atomic_unchecked_t fscache_n_allocs_object_dead;
46727+atomic_unchecked_t fscache_n_alloc_ops;
46728+atomic_unchecked_t fscache_n_alloc_op_waits;
46729
46730-atomic_t fscache_n_retrievals;
46731-atomic_t fscache_n_retrievals_ok;
46732-atomic_t fscache_n_retrievals_wait;
46733-atomic_t fscache_n_retrievals_nodata;
46734-atomic_t fscache_n_retrievals_nobufs;
46735-atomic_t fscache_n_retrievals_intr;
46736-atomic_t fscache_n_retrievals_nomem;
46737-atomic_t fscache_n_retrievals_object_dead;
46738-atomic_t fscache_n_retrieval_ops;
46739-atomic_t fscache_n_retrieval_op_waits;
46740+atomic_unchecked_t fscache_n_retrievals;
46741+atomic_unchecked_t fscache_n_retrievals_ok;
46742+atomic_unchecked_t fscache_n_retrievals_wait;
46743+atomic_unchecked_t fscache_n_retrievals_nodata;
46744+atomic_unchecked_t fscache_n_retrievals_nobufs;
46745+atomic_unchecked_t fscache_n_retrievals_intr;
46746+atomic_unchecked_t fscache_n_retrievals_nomem;
46747+atomic_unchecked_t fscache_n_retrievals_object_dead;
46748+atomic_unchecked_t fscache_n_retrieval_ops;
46749+atomic_unchecked_t fscache_n_retrieval_op_waits;
46750
46751-atomic_t fscache_n_stores;
46752-atomic_t fscache_n_stores_ok;
46753-atomic_t fscache_n_stores_again;
46754-atomic_t fscache_n_stores_nobufs;
46755-atomic_t fscache_n_stores_oom;
46756-atomic_t fscache_n_store_ops;
46757-atomic_t fscache_n_store_calls;
46758-atomic_t fscache_n_store_pages;
46759-atomic_t fscache_n_store_radix_deletes;
46760-atomic_t fscache_n_store_pages_over_limit;
46761+atomic_unchecked_t fscache_n_stores;
46762+atomic_unchecked_t fscache_n_stores_ok;
46763+atomic_unchecked_t fscache_n_stores_again;
46764+atomic_unchecked_t fscache_n_stores_nobufs;
46765+atomic_unchecked_t fscache_n_stores_oom;
46766+atomic_unchecked_t fscache_n_store_ops;
46767+atomic_unchecked_t fscache_n_store_calls;
46768+atomic_unchecked_t fscache_n_store_pages;
46769+atomic_unchecked_t fscache_n_store_radix_deletes;
46770+atomic_unchecked_t fscache_n_store_pages_over_limit;
46771
46772-atomic_t fscache_n_store_vmscan_not_storing;
46773-atomic_t fscache_n_store_vmscan_gone;
46774-atomic_t fscache_n_store_vmscan_busy;
46775-atomic_t fscache_n_store_vmscan_cancelled;
46776+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
46777+atomic_unchecked_t fscache_n_store_vmscan_gone;
46778+atomic_unchecked_t fscache_n_store_vmscan_busy;
46779+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
46780
46781-atomic_t fscache_n_marks;
46782-atomic_t fscache_n_uncaches;
46783+atomic_unchecked_t fscache_n_marks;
46784+atomic_unchecked_t fscache_n_uncaches;
46785
46786-atomic_t fscache_n_acquires;
46787-atomic_t fscache_n_acquires_null;
46788-atomic_t fscache_n_acquires_no_cache;
46789-atomic_t fscache_n_acquires_ok;
46790-atomic_t fscache_n_acquires_nobufs;
46791-atomic_t fscache_n_acquires_oom;
46792+atomic_unchecked_t fscache_n_acquires;
46793+atomic_unchecked_t fscache_n_acquires_null;
46794+atomic_unchecked_t fscache_n_acquires_no_cache;
46795+atomic_unchecked_t fscache_n_acquires_ok;
46796+atomic_unchecked_t fscache_n_acquires_nobufs;
46797+atomic_unchecked_t fscache_n_acquires_oom;
46798
46799-atomic_t fscache_n_updates;
46800-atomic_t fscache_n_updates_null;
46801-atomic_t fscache_n_updates_run;
46802+atomic_unchecked_t fscache_n_updates;
46803+atomic_unchecked_t fscache_n_updates_null;
46804+atomic_unchecked_t fscache_n_updates_run;
46805
46806-atomic_t fscache_n_relinquishes;
46807-atomic_t fscache_n_relinquishes_null;
46808-atomic_t fscache_n_relinquishes_waitcrt;
46809-atomic_t fscache_n_relinquishes_retire;
46810+atomic_unchecked_t fscache_n_relinquishes;
46811+atomic_unchecked_t fscache_n_relinquishes_null;
46812+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
46813+atomic_unchecked_t fscache_n_relinquishes_retire;
46814
46815-atomic_t fscache_n_cookie_index;
46816-atomic_t fscache_n_cookie_data;
46817-atomic_t fscache_n_cookie_special;
46818+atomic_unchecked_t fscache_n_cookie_index;
46819+atomic_unchecked_t fscache_n_cookie_data;
46820+atomic_unchecked_t fscache_n_cookie_special;
46821
46822-atomic_t fscache_n_object_alloc;
46823-atomic_t fscache_n_object_no_alloc;
46824-atomic_t fscache_n_object_lookups;
46825-atomic_t fscache_n_object_lookups_negative;
46826-atomic_t fscache_n_object_lookups_positive;
46827-atomic_t fscache_n_object_lookups_timed_out;
46828-atomic_t fscache_n_object_created;
46829-atomic_t fscache_n_object_avail;
46830-atomic_t fscache_n_object_dead;
46831+atomic_unchecked_t fscache_n_object_alloc;
46832+atomic_unchecked_t fscache_n_object_no_alloc;
46833+atomic_unchecked_t fscache_n_object_lookups;
46834+atomic_unchecked_t fscache_n_object_lookups_negative;
46835+atomic_unchecked_t fscache_n_object_lookups_positive;
46836+atomic_unchecked_t fscache_n_object_lookups_timed_out;
46837+atomic_unchecked_t fscache_n_object_created;
46838+atomic_unchecked_t fscache_n_object_avail;
46839+atomic_unchecked_t fscache_n_object_dead;
46840
46841-atomic_t fscache_n_checkaux_none;
46842-atomic_t fscache_n_checkaux_okay;
46843-atomic_t fscache_n_checkaux_update;
46844-atomic_t fscache_n_checkaux_obsolete;
46845+atomic_unchecked_t fscache_n_checkaux_none;
46846+atomic_unchecked_t fscache_n_checkaux_okay;
46847+atomic_unchecked_t fscache_n_checkaux_update;
46848+atomic_unchecked_t fscache_n_checkaux_obsolete;
46849
46850 atomic_t fscache_n_cop_alloc_object;
46851 atomic_t fscache_n_cop_lookup_object;
46852@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
46853 seq_puts(m, "FS-Cache statistics\n");
46854
46855 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
46856- atomic_read(&fscache_n_cookie_index),
46857- atomic_read(&fscache_n_cookie_data),
46858- atomic_read(&fscache_n_cookie_special));
46859+ atomic_read_unchecked(&fscache_n_cookie_index),
46860+ atomic_read_unchecked(&fscache_n_cookie_data),
46861+ atomic_read_unchecked(&fscache_n_cookie_special));
46862
46863 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
46864- atomic_read(&fscache_n_object_alloc),
46865- atomic_read(&fscache_n_object_no_alloc),
46866- atomic_read(&fscache_n_object_avail),
46867- atomic_read(&fscache_n_object_dead));
46868+ atomic_read_unchecked(&fscache_n_object_alloc),
46869+ atomic_read_unchecked(&fscache_n_object_no_alloc),
46870+ atomic_read_unchecked(&fscache_n_object_avail),
46871+ atomic_read_unchecked(&fscache_n_object_dead));
46872 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
46873- atomic_read(&fscache_n_checkaux_none),
46874- atomic_read(&fscache_n_checkaux_okay),
46875- atomic_read(&fscache_n_checkaux_update),
46876- atomic_read(&fscache_n_checkaux_obsolete));
46877+ atomic_read_unchecked(&fscache_n_checkaux_none),
46878+ atomic_read_unchecked(&fscache_n_checkaux_okay),
46879+ atomic_read_unchecked(&fscache_n_checkaux_update),
46880+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
46881
46882 seq_printf(m, "Pages : mrk=%u unc=%u\n",
46883- atomic_read(&fscache_n_marks),
46884- atomic_read(&fscache_n_uncaches));
46885+ atomic_read_unchecked(&fscache_n_marks),
46886+ atomic_read_unchecked(&fscache_n_uncaches));
46887
46888 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
46889 " oom=%u\n",
46890- atomic_read(&fscache_n_acquires),
46891- atomic_read(&fscache_n_acquires_null),
46892- atomic_read(&fscache_n_acquires_no_cache),
46893- atomic_read(&fscache_n_acquires_ok),
46894- atomic_read(&fscache_n_acquires_nobufs),
46895- atomic_read(&fscache_n_acquires_oom));
46896+ atomic_read_unchecked(&fscache_n_acquires),
46897+ atomic_read_unchecked(&fscache_n_acquires_null),
46898+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
46899+ atomic_read_unchecked(&fscache_n_acquires_ok),
46900+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
46901+ atomic_read_unchecked(&fscache_n_acquires_oom));
46902
46903 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
46904- atomic_read(&fscache_n_object_lookups),
46905- atomic_read(&fscache_n_object_lookups_negative),
46906- atomic_read(&fscache_n_object_lookups_positive),
46907- atomic_read(&fscache_n_object_created),
46908- atomic_read(&fscache_n_object_lookups_timed_out));
46909+ atomic_read_unchecked(&fscache_n_object_lookups),
46910+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
46911+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
46912+ atomic_read_unchecked(&fscache_n_object_created),
46913+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
46914
46915 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
46916- atomic_read(&fscache_n_updates),
46917- atomic_read(&fscache_n_updates_null),
46918- atomic_read(&fscache_n_updates_run));
46919+ atomic_read_unchecked(&fscache_n_updates),
46920+ atomic_read_unchecked(&fscache_n_updates_null),
46921+ atomic_read_unchecked(&fscache_n_updates_run));
46922
46923 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
46924- atomic_read(&fscache_n_relinquishes),
46925- atomic_read(&fscache_n_relinquishes_null),
46926- atomic_read(&fscache_n_relinquishes_waitcrt),
46927- atomic_read(&fscache_n_relinquishes_retire));
46928+ atomic_read_unchecked(&fscache_n_relinquishes),
46929+ atomic_read_unchecked(&fscache_n_relinquishes_null),
46930+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
46931+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
46932
46933 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
46934- atomic_read(&fscache_n_attr_changed),
46935- atomic_read(&fscache_n_attr_changed_ok),
46936- atomic_read(&fscache_n_attr_changed_nobufs),
46937- atomic_read(&fscache_n_attr_changed_nomem),
46938- atomic_read(&fscache_n_attr_changed_calls));
46939+ atomic_read_unchecked(&fscache_n_attr_changed),
46940+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
46941+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
46942+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
46943+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
46944
46945 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
46946- atomic_read(&fscache_n_allocs),
46947- atomic_read(&fscache_n_allocs_ok),
46948- atomic_read(&fscache_n_allocs_wait),
46949- atomic_read(&fscache_n_allocs_nobufs),
46950- atomic_read(&fscache_n_allocs_intr));
46951+ atomic_read_unchecked(&fscache_n_allocs),
46952+ atomic_read_unchecked(&fscache_n_allocs_ok),
46953+ atomic_read_unchecked(&fscache_n_allocs_wait),
46954+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
46955+ atomic_read_unchecked(&fscache_n_allocs_intr));
46956 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
46957- atomic_read(&fscache_n_alloc_ops),
46958- atomic_read(&fscache_n_alloc_op_waits),
46959- atomic_read(&fscache_n_allocs_object_dead));
46960+ atomic_read_unchecked(&fscache_n_alloc_ops),
46961+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
46962+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
46963
46964 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
46965 " int=%u oom=%u\n",
46966- atomic_read(&fscache_n_retrievals),
46967- atomic_read(&fscache_n_retrievals_ok),
46968- atomic_read(&fscache_n_retrievals_wait),
46969- atomic_read(&fscache_n_retrievals_nodata),
46970- atomic_read(&fscache_n_retrievals_nobufs),
46971- atomic_read(&fscache_n_retrievals_intr),
46972- atomic_read(&fscache_n_retrievals_nomem));
46973+ atomic_read_unchecked(&fscache_n_retrievals),
46974+ atomic_read_unchecked(&fscache_n_retrievals_ok),
46975+ atomic_read_unchecked(&fscache_n_retrievals_wait),
46976+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
46977+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
46978+ atomic_read_unchecked(&fscache_n_retrievals_intr),
46979+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
46980 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
46981- atomic_read(&fscache_n_retrieval_ops),
46982- atomic_read(&fscache_n_retrieval_op_waits),
46983- atomic_read(&fscache_n_retrievals_object_dead));
46984+ atomic_read_unchecked(&fscache_n_retrieval_ops),
46985+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
46986+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
46987
46988 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
46989- atomic_read(&fscache_n_stores),
46990- atomic_read(&fscache_n_stores_ok),
46991- atomic_read(&fscache_n_stores_again),
46992- atomic_read(&fscache_n_stores_nobufs),
46993- atomic_read(&fscache_n_stores_oom));
46994+ atomic_read_unchecked(&fscache_n_stores),
46995+ atomic_read_unchecked(&fscache_n_stores_ok),
46996+ atomic_read_unchecked(&fscache_n_stores_again),
46997+ atomic_read_unchecked(&fscache_n_stores_nobufs),
46998+ atomic_read_unchecked(&fscache_n_stores_oom));
46999 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
47000- atomic_read(&fscache_n_store_ops),
47001- atomic_read(&fscache_n_store_calls),
47002- atomic_read(&fscache_n_store_pages),
47003- atomic_read(&fscache_n_store_radix_deletes),
47004- atomic_read(&fscache_n_store_pages_over_limit));
47005+ atomic_read_unchecked(&fscache_n_store_ops),
47006+ atomic_read_unchecked(&fscache_n_store_calls),
47007+ atomic_read_unchecked(&fscache_n_store_pages),
47008+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
47009+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
47010
47011 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
47012- atomic_read(&fscache_n_store_vmscan_not_storing),
47013- atomic_read(&fscache_n_store_vmscan_gone),
47014- atomic_read(&fscache_n_store_vmscan_busy),
47015- atomic_read(&fscache_n_store_vmscan_cancelled));
47016+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
47017+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
47018+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
47019+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
47020
47021 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
47022- atomic_read(&fscache_n_op_pend),
47023- atomic_read(&fscache_n_op_run),
47024- atomic_read(&fscache_n_op_enqueue),
47025- atomic_read(&fscache_n_op_cancelled),
47026- atomic_read(&fscache_n_op_rejected));
47027+ atomic_read_unchecked(&fscache_n_op_pend),
47028+ atomic_read_unchecked(&fscache_n_op_run),
47029+ atomic_read_unchecked(&fscache_n_op_enqueue),
47030+ atomic_read_unchecked(&fscache_n_op_cancelled),
47031+ atomic_read_unchecked(&fscache_n_op_rejected));
47032 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
47033- atomic_read(&fscache_n_op_deferred_release),
47034- atomic_read(&fscache_n_op_release),
47035- atomic_read(&fscache_n_op_gc));
47036+ atomic_read_unchecked(&fscache_n_op_deferred_release),
47037+ atomic_read_unchecked(&fscache_n_op_release),
47038+ atomic_read_unchecked(&fscache_n_op_gc));
47039
47040 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
47041 atomic_read(&fscache_n_cop_alloc_object),
47042diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
47043index 3426521..3b75162 100644
47044--- a/fs/fuse/cuse.c
47045+++ b/fs/fuse/cuse.c
47046@@ -587,10 +587,12 @@ static int __init cuse_init(void)
47047 INIT_LIST_HEAD(&cuse_conntbl[i]);
47048
47049 /* inherit and extend fuse_dev_operations */
47050- cuse_channel_fops = fuse_dev_operations;
47051- cuse_channel_fops.owner = THIS_MODULE;
47052- cuse_channel_fops.open = cuse_channel_open;
47053- cuse_channel_fops.release = cuse_channel_release;
47054+ pax_open_kernel();
47055+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
47056+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
47057+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
47058+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
47059+ pax_close_kernel();
47060
47061 cuse_class = class_create(THIS_MODULE, "cuse");
47062 if (IS_ERR(cuse_class))
47063diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
47064index f4246cf..b4aed1d 100644
47065--- a/fs/fuse/dev.c
47066+++ b/fs/fuse/dev.c
47067@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
47068 ret = 0;
47069 pipe_lock(pipe);
47070
47071- if (!pipe->readers) {
47072+ if (!atomic_read(&pipe->readers)) {
47073 send_sig(SIGPIPE, current, 0);
47074 if (!ret)
47075 ret = -EPIPE;
47076diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
47077index 334e0b1..fc571e8 100644
47078--- a/fs/fuse/dir.c
47079+++ b/fs/fuse/dir.c
47080@@ -1189,7 +1189,7 @@ static char *read_link(struct dentry *dentry)
47081 return link;
47082 }
47083
47084-static void free_link(char *link)
47085+static void free_link(const char *link)
47086 {
47087 if (!IS_ERR(link))
47088 free_page((unsigned long) link);
47089diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
47090index a9ba244..d9df391 100644
47091--- a/fs/gfs2/inode.c
47092+++ b/fs/gfs2/inode.c
47093@@ -1496,7 +1496,7 @@ out:
47094
47095 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
47096 {
47097- char *s = nd_get_link(nd);
47098+ const char *s = nd_get_link(nd);
47099 if (!IS_ERR(s))
47100 kfree(s);
47101 }
47102diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
47103index cc9281b..58996fb 100644
47104--- a/fs/hugetlbfs/inode.c
47105+++ b/fs/hugetlbfs/inode.c
47106@@ -920,7 +920,7 @@ static struct file_system_type hugetlbfs_fs_type = {
47107 .kill_sb = kill_litter_super,
47108 };
47109
47110-static struct vfsmount *hugetlbfs_vfsmount;
47111+struct vfsmount *hugetlbfs_vfsmount;
47112
47113 static int can_do_hugetlb_shm(void)
47114 {
47115diff --git a/fs/inode.c b/fs/inode.c
47116index c99163b..a11ad40 100644
47117--- a/fs/inode.c
47118+++ b/fs/inode.c
47119@@ -867,8 +867,8 @@ unsigned int get_next_ino(void)
47120
47121 #ifdef CONFIG_SMP
47122 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
47123- static atomic_t shared_last_ino;
47124- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
47125+ static atomic_unchecked_t shared_last_ino;
47126+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
47127
47128 res = next - LAST_INO_BATCH;
47129 }
47130diff --git a/fs/isofs/export.c b/fs/isofs/export.c
47131index aa4356d..1d38044 100644
47132--- a/fs/isofs/export.c
47133+++ b/fs/isofs/export.c
47134@@ -134,6 +134,7 @@ isofs_export_encode_fh(struct inode *inode,
47135 len = 3;
47136 fh32[0] = ei->i_iget5_block;
47137 fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */
47138+ fh16[3] = 0; /* avoid leaking uninitialized data */
47139 fh32[2] = inode->i_generation;
47140 if (parent) {
47141 struct iso_inode_info *eparent;
47142diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
47143index 4a6cf28..d3a29d3 100644
47144--- a/fs/jffs2/erase.c
47145+++ b/fs/jffs2/erase.c
47146@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
47147 struct jffs2_unknown_node marker = {
47148 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
47149 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
47150- .totlen = cpu_to_je32(c->cleanmarker_size)
47151+ .totlen = cpu_to_je32(c->cleanmarker_size),
47152+ .hdr_crc = cpu_to_je32(0)
47153 };
47154
47155 jffs2_prealloc_raw_node_refs(c, jeb, 1);
47156diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
47157index 6f4529d..bf12806 100644
47158--- a/fs/jffs2/wbuf.c
47159+++ b/fs/jffs2/wbuf.c
47160@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
47161 {
47162 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
47163 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
47164- .totlen = constant_cpu_to_je32(8)
47165+ .totlen = constant_cpu_to_je32(8),
47166+ .hdr_crc = constant_cpu_to_je32(0)
47167 };
47168
47169 /*
47170diff --git a/fs/jfs/super.c b/fs/jfs/super.c
47171index 4a82950..bcaa0cb 100644
47172--- a/fs/jfs/super.c
47173+++ b/fs/jfs/super.c
47174@@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
47175
47176 jfs_inode_cachep =
47177 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
47178- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
47179+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
47180 init_once);
47181 if (jfs_inode_cachep == NULL)
47182 return -ENOMEM;
47183diff --git a/fs/libfs.c b/fs/libfs.c
47184index f86ec27..4734776 100644
47185--- a/fs/libfs.c
47186+++ b/fs/libfs.c
47187@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
47188
47189 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
47190 struct dentry *next;
47191+ char d_name[sizeof(next->d_iname)];
47192+ const unsigned char *name;
47193+
47194 next = list_entry(p, struct dentry, d_u.d_child);
47195 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
47196 if (!simple_positive(next)) {
47197@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
47198
47199 spin_unlock(&next->d_lock);
47200 spin_unlock(&dentry->d_lock);
47201- if (filldir(dirent, next->d_name.name,
47202+ name = next->d_name.name;
47203+ if (name == next->d_iname) {
47204+ memcpy(d_name, name, next->d_name.len);
47205+ name = d_name;
47206+ }
47207+ if (filldir(dirent, name,
47208 next->d_name.len, filp->f_pos,
47209 next->d_inode->i_ino,
47210 dt_type(next->d_inode)) < 0)
47211diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
47212index 8392cb8..80d6193 100644
47213--- a/fs/lockd/clntproc.c
47214+++ b/fs/lockd/clntproc.c
47215@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
47216 /*
47217 * Cookie counter for NLM requests
47218 */
47219-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
47220+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
47221
47222 void nlmclnt_next_cookie(struct nlm_cookie *c)
47223 {
47224- u32 cookie = atomic_inc_return(&nlm_cookie);
47225+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
47226
47227 memcpy(c->data, &cookie, 4);
47228 c->len=4;
47229diff --git a/fs/locks.c b/fs/locks.c
47230index 82c3533..34e929c 100644
47231--- a/fs/locks.c
47232+++ b/fs/locks.c
47233@@ -2076,16 +2076,16 @@ void locks_remove_flock(struct file *filp)
47234 return;
47235
47236 if (filp->f_op && filp->f_op->flock) {
47237- struct file_lock fl = {
47238+ struct file_lock flock = {
47239 .fl_pid = current->tgid,
47240 .fl_file = filp,
47241 .fl_flags = FL_FLOCK,
47242 .fl_type = F_UNLCK,
47243 .fl_end = OFFSET_MAX,
47244 };
47245- filp->f_op->flock(filp, F_SETLKW, &fl);
47246- if (fl.fl_ops && fl.fl_ops->fl_release_private)
47247- fl.fl_ops->fl_release_private(&fl);
47248+ filp->f_op->flock(filp, F_SETLKW, &flock);
47249+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
47250+ flock.fl_ops->fl_release_private(&flock);
47251 }
47252
47253 lock_flocks();
47254diff --git a/fs/namei.c b/fs/namei.c
47255index 7d69419..c7a09f0 100644
47256--- a/fs/namei.c
47257+++ b/fs/namei.c
47258@@ -265,16 +265,32 @@ int generic_permission(struct inode *inode, int mask)
47259 if (ret != -EACCES)
47260 return ret;
47261
47262+#ifdef CONFIG_GRKERNSEC
47263+ /* we'll block if we have to log due to a denied capability use */
47264+ if (mask & MAY_NOT_BLOCK)
47265+ return -ECHILD;
47266+#endif
47267+
47268 if (S_ISDIR(inode->i_mode)) {
47269 /* DACs are overridable for directories */
47270- if (inode_capable(inode, CAP_DAC_OVERRIDE))
47271- return 0;
47272 if (!(mask & MAY_WRITE))
47273- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
47274+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
47275+ inode_capable(inode, CAP_DAC_READ_SEARCH))
47276 return 0;
47277+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
47278+ return 0;
47279 return -EACCES;
47280 }
47281 /*
47282+ * Searching includes executable on directories, else just read.
47283+ */
47284+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
47285+ if (mask == MAY_READ)
47286+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
47287+ inode_capable(inode, CAP_DAC_READ_SEARCH))
47288+ return 0;
47289+
47290+ /*
47291 * Read/write DACs are always overridable.
47292 * Executable DACs are overridable when there is
47293 * at least one exec bit set.
47294@@ -283,14 +299,6 @@ int generic_permission(struct inode *inode, int mask)
47295 if (inode_capable(inode, CAP_DAC_OVERRIDE))
47296 return 0;
47297
47298- /*
47299- * Searching includes executable on directories, else just read.
47300- */
47301- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
47302- if (mask == MAY_READ)
47303- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
47304- return 0;
47305-
47306 return -EACCES;
47307 }
47308
47309@@ -639,11 +647,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
47310 return error;
47311 }
47312
47313+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
47314+ dentry->d_inode, dentry, nd->path.mnt)) {
47315+ error = -EACCES;
47316+ *p = ERR_PTR(error); /* no ->put_link(), please */
47317+ path_put(&nd->path);
47318+ return error;
47319+ }
47320+
47321 nd->last_type = LAST_BIND;
47322 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
47323 error = PTR_ERR(*p);
47324 if (!IS_ERR(*p)) {
47325- char *s = nd_get_link(nd);
47326+ const char *s = nd_get_link(nd);
47327 error = 0;
47328 if (s)
47329 error = __vfs_follow_link(nd, s);
47330@@ -1386,6 +1402,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
47331 if (!res)
47332 res = walk_component(nd, path, &nd->last,
47333 nd->last_type, LOOKUP_FOLLOW);
47334+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
47335+ res = -EACCES;
47336 put_link(nd, &link, cookie);
47337 } while (res > 0);
47338
47339@@ -1779,6 +1797,8 @@ static int path_lookupat(int dfd, const char *name,
47340 err = follow_link(&link, nd, &cookie);
47341 if (!err)
47342 err = lookup_last(nd, &path);
47343+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
47344+ err = -EACCES;
47345 put_link(nd, &link, cookie);
47346 }
47347 }
47348@@ -1786,6 +1806,21 @@ static int path_lookupat(int dfd, const char *name,
47349 if (!err)
47350 err = complete_walk(nd);
47351
47352+ if (!(nd->flags & LOOKUP_PARENT)) {
47353+#ifdef CONFIG_GRKERNSEC
47354+ if (flags & LOOKUP_RCU) {
47355+ if (!err)
47356+ path_put(&nd->path);
47357+ err = -ECHILD;
47358+ } else
47359+#endif
47360+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47361+ if (!err)
47362+ path_put(&nd->path);
47363+ err = -ENOENT;
47364+ }
47365+ }
47366+
47367 if (!err && nd->flags & LOOKUP_DIRECTORY) {
47368 if (!nd->inode->i_op->lookup) {
47369 path_put(&nd->path);
47370@@ -1813,6 +1848,15 @@ static int do_path_lookup(int dfd, const char *name,
47371 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
47372
47373 if (likely(!retval)) {
47374+ if (*name != '/' && nd->path.dentry && nd->inode) {
47375+#ifdef CONFIG_GRKERNSEC
47376+ if (flags & LOOKUP_RCU)
47377+ return -ECHILD;
47378+#endif
47379+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
47380+ return -ENOENT;
47381+ }
47382+
47383 if (unlikely(!audit_dummy_context())) {
47384 if (nd->path.dentry && nd->inode)
47385 audit_inode(name, nd->path.dentry);
47386@@ -2155,6 +2199,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
47387 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
47388 return -EPERM;
47389
47390+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
47391+ return -EPERM;
47392+ if (gr_handle_rawio(inode))
47393+ return -EPERM;
47394+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
47395+ return -EACCES;
47396+
47397 return 0;
47398 }
47399
47400@@ -2190,7 +2241,7 @@ static inline int open_to_namei_flags(int flag)
47401 /*
47402 * Handle the last step of open()
47403 */
47404-static struct file *do_last(struct nameidata *nd, struct path *path,
47405+static struct file *do_last(struct nameidata *nd, struct path *path, struct path *link,
47406 const struct open_flags *op, const char *pathname)
47407 {
47408 struct dentry *dir = nd->path.dentry;
47409@@ -2220,16 +2271,44 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47410 error = complete_walk(nd);
47411 if (error)
47412 return ERR_PTR(error);
47413+#ifdef CONFIG_GRKERNSEC
47414+ if (nd->flags & LOOKUP_RCU) {
47415+ error = -ECHILD;
47416+ goto exit;
47417+ }
47418+#endif
47419+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47420+ error = -ENOENT;
47421+ goto exit;
47422+ }
47423 audit_inode(pathname, nd->path.dentry);
47424 if (open_flag & O_CREAT) {
47425 error = -EISDIR;
47426 goto exit;
47427 }
47428+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
47429+ error = -EACCES;
47430+ goto exit;
47431+ }
47432 goto ok;
47433 case LAST_BIND:
47434 error = complete_walk(nd);
47435 if (error)
47436 return ERR_PTR(error);
47437+#ifdef CONFIG_GRKERNSEC
47438+ if (nd->flags & LOOKUP_RCU) {
47439+ error = -ECHILD;
47440+ goto exit;
47441+ }
47442+#endif
47443+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
47444+ error = -ENOENT;
47445+ goto exit;
47446+ }
47447+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
47448+ error = -EACCES;
47449+ goto exit;
47450+ }
47451 audit_inode(pathname, dir);
47452 goto ok;
47453 }
47454@@ -2285,6 +2364,17 @@ retry_lookup:
47455 /* Negative dentry, just create the file */
47456 if (!dentry->d_inode) {
47457 umode_t mode = op->mode;
47458+
47459+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
47460+ error = -EACCES;
47461+ goto exit_mutex_unlock;
47462+ }
47463+
47464+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
47465+ error = -EACCES;
47466+ goto exit_mutex_unlock;
47467+ }
47468+
47469 if (!IS_POSIXACL(dir->d_inode))
47470 mode &= ~current_umask();
47471 /*
47472@@ -2308,6 +2398,8 @@ retry_lookup:
47473 error = vfs_create(dir->d_inode, dentry, mode, nd);
47474 if (error)
47475 goto exit_mutex_unlock;
47476+ else
47477+ gr_handle_create(path->dentry, path->mnt);
47478 mutex_unlock(&dir->d_inode->i_mutex);
47479 dput(nd->path.dentry);
47480 nd->path.dentry = dentry;
47481@@ -2317,6 +2409,23 @@ retry_lookup:
47482 /*
47483 * It already exists.
47484 */
47485+
47486+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
47487+ error = -ENOENT;
47488+ goto exit_mutex_unlock;
47489+ }
47490+ if (link && gr_handle_symlink_owner(link, dentry->d_inode)) {
47491+ error = -EACCES;
47492+ goto exit_mutex_unlock;
47493+ }
47494+
47495+ /* only check if O_CREAT is specified, all other checks need to go
47496+ into may_open */
47497+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
47498+ error = -EACCES;
47499+ goto exit_mutex_unlock;
47500+ }
47501+
47502 mutex_unlock(&dir->d_inode->i_mutex);
47503 audit_inode(pathname, path->dentry);
47504
47505@@ -2349,6 +2458,11 @@ finish_lookup:
47506 }
47507 }
47508 BUG_ON(inode != path->dentry->d_inode);
47509+ /* if we're resolving a symlink to another symlink */
47510+ if (link && gr_handle_symlink_owner(link, inode)) {
47511+ error = -EACCES;
47512+ goto exit;
47513+ }
47514 return NULL;
47515 }
47516
47517@@ -2358,7 +2472,6 @@ finish_lookup:
47518 save_parent.dentry = nd->path.dentry;
47519 save_parent.mnt = mntget(path->mnt);
47520 nd->path.dentry = path->dentry;
47521-
47522 }
47523 nd->inode = inode;
47524 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
47525@@ -2367,6 +2480,21 @@ finish_lookup:
47526 path_put(&save_parent);
47527 return ERR_PTR(error);
47528 }
47529+#ifdef CONFIG_GRKERNSEC
47530+ if (nd->flags & LOOKUP_RCU) {
47531+ error = -ECHILD;
47532+ goto exit;
47533+ }
47534+#endif
47535+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47536+ error = -ENOENT;
47537+ goto exit;
47538+ }
47539+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
47540+ error = -EACCES;
47541+ goto exit;
47542+ }
47543+
47544 error = -EISDIR;
47545 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
47546 goto exit;
47547@@ -2461,7 +2589,7 @@ static struct file *path_openat(int dfd, const char *pathname,
47548 if (unlikely(error))
47549 goto out_filp;
47550
47551- filp = do_last(nd, &path, op, pathname);
47552+ filp = do_last(nd, &path, NULL, op, pathname);
47553 while (unlikely(!filp)) { /* trailing symlink */
47554 struct path link = path;
47555 void *cookie;
47556@@ -2476,8 +2604,9 @@ static struct file *path_openat(int dfd, const char *pathname,
47557 error = follow_link(&link, nd, &cookie);
47558 if (unlikely(error))
47559 filp = ERR_PTR(error);
47560- else
47561- filp = do_last(nd, &path, op, pathname);
47562+ else {
47563+ filp = do_last(nd, &path, &link, op, pathname);
47564+ }
47565 put_link(nd, &link, cookie);
47566 }
47567 out:
47568@@ -2577,6 +2706,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
47569 *path = nd.path;
47570 return dentry;
47571 eexist:
47572+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
47573+ dput(dentry);
47574+ dentry = ERR_PTR(-ENOENT);
47575+ goto fail;
47576+ }
47577 dput(dentry);
47578 dentry = ERR_PTR(-EEXIST);
47579 fail:
47580@@ -2599,6 +2733,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
47581 }
47582 EXPORT_SYMBOL(user_path_create);
47583
47584+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
47585+{
47586+ char *tmp = getname(pathname);
47587+ struct dentry *res;
47588+ if (IS_ERR(tmp))
47589+ return ERR_CAST(tmp);
47590+ res = kern_path_create(dfd, tmp, path, is_dir);
47591+ if (IS_ERR(res))
47592+ putname(tmp);
47593+ else
47594+ *to = tmp;
47595+ return res;
47596+}
47597+
47598 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
47599 {
47600 int error = may_create(dir, dentry);
47601@@ -2665,6 +2813,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
47602 error = mnt_want_write(path.mnt);
47603 if (error)
47604 goto out_dput;
47605+
47606+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
47607+ error = -EPERM;
47608+ goto out_drop_write;
47609+ }
47610+
47611+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
47612+ error = -EACCES;
47613+ goto out_drop_write;
47614+ }
47615+
47616 error = security_path_mknod(&path, dentry, mode, dev);
47617 if (error)
47618 goto out_drop_write;
47619@@ -2682,6 +2841,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
47620 }
47621 out_drop_write:
47622 mnt_drop_write(path.mnt);
47623+
47624+ if (!error)
47625+ gr_handle_create(dentry, path.mnt);
47626 out_dput:
47627 dput(dentry);
47628 mutex_unlock(&path.dentry->d_inode->i_mutex);
47629@@ -2735,12 +2897,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
47630 error = mnt_want_write(path.mnt);
47631 if (error)
47632 goto out_dput;
47633+
47634+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
47635+ error = -EACCES;
47636+ goto out_drop_write;
47637+ }
47638+
47639 error = security_path_mkdir(&path, dentry, mode);
47640 if (error)
47641 goto out_drop_write;
47642 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
47643 out_drop_write:
47644 mnt_drop_write(path.mnt);
47645+
47646+ if (!error)
47647+ gr_handle_create(dentry, path.mnt);
47648 out_dput:
47649 dput(dentry);
47650 mutex_unlock(&path.dentry->d_inode->i_mutex);
47651@@ -2820,6 +2991,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
47652 char * name;
47653 struct dentry *dentry;
47654 struct nameidata nd;
47655+ ino_t saved_ino = 0;
47656+ dev_t saved_dev = 0;
47657
47658 error = user_path_parent(dfd, pathname, &nd, &name);
47659 if (error)
47660@@ -2848,6 +3021,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
47661 error = -ENOENT;
47662 goto exit3;
47663 }
47664+
47665+ saved_ino = dentry->d_inode->i_ino;
47666+ saved_dev = gr_get_dev_from_dentry(dentry);
47667+
47668+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
47669+ error = -EACCES;
47670+ goto exit3;
47671+ }
47672+
47673 error = mnt_want_write(nd.path.mnt);
47674 if (error)
47675 goto exit3;
47676@@ -2855,6 +3037,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
47677 if (error)
47678 goto exit4;
47679 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
47680+ if (!error && (saved_dev || saved_ino))
47681+ gr_handle_delete(saved_ino, saved_dev);
47682 exit4:
47683 mnt_drop_write(nd.path.mnt);
47684 exit3:
47685@@ -2917,6 +3101,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47686 struct dentry *dentry;
47687 struct nameidata nd;
47688 struct inode *inode = NULL;
47689+ ino_t saved_ino = 0;
47690+ dev_t saved_dev = 0;
47691
47692 error = user_path_parent(dfd, pathname, &nd, &name);
47693 if (error)
47694@@ -2939,6 +3125,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47695 if (!inode)
47696 goto slashes;
47697 ihold(inode);
47698+
47699+ if (inode->i_nlink <= 1) {
47700+ saved_ino = inode->i_ino;
47701+ saved_dev = gr_get_dev_from_dentry(dentry);
47702+ }
47703+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
47704+ error = -EACCES;
47705+ goto exit2;
47706+ }
47707+
47708 error = mnt_want_write(nd.path.mnt);
47709 if (error)
47710 goto exit2;
47711@@ -2946,6 +3142,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47712 if (error)
47713 goto exit3;
47714 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
47715+ if (!error && (saved_ino || saved_dev))
47716+ gr_handle_delete(saved_ino, saved_dev);
47717 exit3:
47718 mnt_drop_write(nd.path.mnt);
47719 exit2:
47720@@ -3021,10 +3219,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
47721 error = mnt_want_write(path.mnt);
47722 if (error)
47723 goto out_dput;
47724+
47725+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
47726+ error = -EACCES;
47727+ goto out_drop_write;
47728+ }
47729+
47730 error = security_path_symlink(&path, dentry, from);
47731 if (error)
47732 goto out_drop_write;
47733 error = vfs_symlink(path.dentry->d_inode, dentry, from);
47734+ if (!error)
47735+ gr_handle_create(dentry, path.mnt);
47736 out_drop_write:
47737 mnt_drop_write(path.mnt);
47738 out_dput:
47739@@ -3099,6 +3305,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47740 {
47741 struct dentry *new_dentry;
47742 struct path old_path, new_path;
47743+ char *to = NULL;
47744 int how = 0;
47745 int error;
47746
47747@@ -3122,7 +3329,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47748 if (error)
47749 return error;
47750
47751- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
47752+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
47753 error = PTR_ERR(new_dentry);
47754 if (IS_ERR(new_dentry))
47755 goto out;
47756@@ -3133,13 +3340,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47757 error = mnt_want_write(new_path.mnt);
47758 if (error)
47759 goto out_dput;
47760+
47761+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
47762+ old_path.dentry->d_inode,
47763+ old_path.dentry->d_inode->i_mode, to)) {
47764+ error = -EACCES;
47765+ goto out_drop_write;
47766+ }
47767+
47768+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
47769+ old_path.dentry, old_path.mnt, to)) {
47770+ error = -EACCES;
47771+ goto out_drop_write;
47772+ }
47773+
47774 error = security_path_link(old_path.dentry, &new_path, new_dentry);
47775 if (error)
47776 goto out_drop_write;
47777 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
47778+ if (!error)
47779+ gr_handle_create(new_dentry, new_path.mnt);
47780 out_drop_write:
47781 mnt_drop_write(new_path.mnt);
47782 out_dput:
47783+ putname(to);
47784 dput(new_dentry);
47785 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
47786 path_put(&new_path);
47787@@ -3373,6 +3597,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47788 if (new_dentry == trap)
47789 goto exit5;
47790
47791+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
47792+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
47793+ to);
47794+ if (error)
47795+ goto exit5;
47796+
47797 error = mnt_want_write(oldnd.path.mnt);
47798 if (error)
47799 goto exit5;
47800@@ -3382,6 +3612,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47801 goto exit6;
47802 error = vfs_rename(old_dir->d_inode, old_dentry,
47803 new_dir->d_inode, new_dentry);
47804+ if (!error)
47805+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
47806+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
47807 exit6:
47808 mnt_drop_write(oldnd.path.mnt);
47809 exit5:
47810@@ -3407,6 +3640,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
47811
47812 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
47813 {
47814+ char tmpbuf[64];
47815+ const char *newlink;
47816 int len;
47817
47818 len = PTR_ERR(link);
47819@@ -3416,7 +3651,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
47820 len = strlen(link);
47821 if (len > (unsigned) buflen)
47822 len = buflen;
47823- if (copy_to_user(buffer, link, len))
47824+
47825+ if (len < sizeof(tmpbuf)) {
47826+ memcpy(tmpbuf, link, len);
47827+ newlink = tmpbuf;
47828+ } else
47829+ newlink = link;
47830+
47831+ if (copy_to_user(buffer, newlink, len))
47832 len = -EFAULT;
47833 out:
47834 return len;
47835diff --git a/fs/namespace.c b/fs/namespace.c
47836index 1e4a5fe..a5ce747 100644
47837--- a/fs/namespace.c
47838+++ b/fs/namespace.c
47839@@ -1157,6 +1157,9 @@ static int do_umount(struct mount *mnt, int flags)
47840 if (!(sb->s_flags & MS_RDONLY))
47841 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
47842 up_write(&sb->s_umount);
47843+
47844+ gr_log_remount(mnt->mnt_devname, retval);
47845+
47846 return retval;
47847 }
47848
47849@@ -1176,6 +1179,9 @@ static int do_umount(struct mount *mnt, int flags)
47850 br_write_unlock(&vfsmount_lock);
47851 up_write(&namespace_sem);
47852 release_mounts(&umount_list);
47853+
47854+ gr_log_unmount(mnt->mnt_devname, retval);
47855+
47856 return retval;
47857 }
47858
47859@@ -2177,6 +2183,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47860 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
47861 MS_STRICTATIME);
47862
47863+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
47864+ retval = -EPERM;
47865+ goto dput_out;
47866+ }
47867+
47868+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
47869+ retval = -EPERM;
47870+ goto dput_out;
47871+ }
47872+
47873 if (flags & MS_REMOUNT)
47874 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
47875 data_page);
47876@@ -2191,6 +2207,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47877 dev_name, data_page);
47878 dput_out:
47879 path_put(&path);
47880+
47881+ gr_log_mount(dev_name, dir_name, retval);
47882+
47883 return retval;
47884 }
47885
47886@@ -2472,6 +2491,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
47887 if (error)
47888 goto out2;
47889
47890+ if (gr_handle_chroot_pivot()) {
47891+ error = -EPERM;
47892+ goto out2;
47893+ }
47894+
47895 get_fs_root(current->fs, &root);
47896 error = lock_mount(&old);
47897 if (error)
47898diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
47899index f729698..2bac081 100644
47900--- a/fs/nfs/inode.c
47901+++ b/fs/nfs/inode.c
47902@@ -152,7 +152,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
47903 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
47904 nfsi->attrtimeo_timestamp = jiffies;
47905
47906- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
47907+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
47908 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
47909 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
47910 else
47911@@ -1008,16 +1008,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
47912 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
47913 }
47914
47915-static atomic_long_t nfs_attr_generation_counter;
47916+static atomic_long_unchecked_t nfs_attr_generation_counter;
47917
47918 static unsigned long nfs_read_attr_generation_counter(void)
47919 {
47920- return atomic_long_read(&nfs_attr_generation_counter);
47921+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
47922 }
47923
47924 unsigned long nfs_inc_attr_generation_counter(void)
47925 {
47926- return atomic_long_inc_return(&nfs_attr_generation_counter);
47927+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
47928 }
47929
47930 void nfs_fattr_init(struct nfs_fattr *fattr)
47931diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
47932index c8bd9c3..4f83416 100644
47933--- a/fs/nfsd/vfs.c
47934+++ b/fs/nfsd/vfs.c
47935@@ -933,7 +933,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47936 } else {
47937 oldfs = get_fs();
47938 set_fs(KERNEL_DS);
47939- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
47940+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
47941 set_fs(oldfs);
47942 }
47943
47944@@ -1037,7 +1037,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47945
47946 /* Write the data. */
47947 oldfs = get_fs(); set_fs(KERNEL_DS);
47948- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
47949+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
47950 set_fs(oldfs);
47951 if (host_err < 0)
47952 goto out_nfserr;
47953@@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
47954 */
47955
47956 oldfs = get_fs(); set_fs(KERNEL_DS);
47957- host_err = inode->i_op->readlink(path.dentry, buf, *lenp);
47958+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
47959 set_fs(oldfs);
47960
47961 if (host_err < 0)
47962diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
47963index 3568c8a..e0240d8 100644
47964--- a/fs/notify/fanotify/fanotify_user.c
47965+++ b/fs/notify/fanotify/fanotify_user.c
47966@@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
47967 goto out_close_fd;
47968
47969 ret = -EFAULT;
47970- if (copy_to_user(buf, &fanotify_event_metadata,
47971+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
47972+ copy_to_user(buf, &fanotify_event_metadata,
47973 fanotify_event_metadata.event_len))
47974 goto out_kill_access_response;
47975
47976diff --git a/fs/notify/notification.c b/fs/notify/notification.c
47977index c887b13..0fdf472 100644
47978--- a/fs/notify/notification.c
47979+++ b/fs/notify/notification.c
47980@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
47981 * get set to 0 so it will never get 'freed'
47982 */
47983 static struct fsnotify_event *q_overflow_event;
47984-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47985+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47986
47987 /**
47988 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
47989@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47990 */
47991 u32 fsnotify_get_cookie(void)
47992 {
47993- return atomic_inc_return(&fsnotify_sync_cookie);
47994+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
47995 }
47996 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
47997
47998diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
47999index 99e3610..02c1068 100644
48000--- a/fs/ntfs/dir.c
48001+++ b/fs/ntfs/dir.c
48002@@ -1329,7 +1329,7 @@ find_next_index_buffer:
48003 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
48004 ~(s64)(ndir->itype.index.block_size - 1)));
48005 /* Bounds checks. */
48006- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
48007+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
48008 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
48009 "inode 0x%lx or driver bug.", vdir->i_ino);
48010 goto err_out;
48011diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
48012index 7389d2d..dfd5dbe 100644
48013--- a/fs/ntfs/file.c
48014+++ b/fs/ntfs/file.c
48015@@ -2231,6 +2231,6 @@ const struct inode_operations ntfs_file_inode_ops = {
48016 #endif /* NTFS_RW */
48017 };
48018
48019-const struct file_operations ntfs_empty_file_ops = {};
48020+const struct file_operations ntfs_empty_file_ops __read_only;
48021
48022-const struct inode_operations ntfs_empty_inode_ops = {};
48023+const struct inode_operations ntfs_empty_inode_ops __read_only;
48024diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
48025index 210c352..a174f83 100644
48026--- a/fs/ocfs2/localalloc.c
48027+++ b/fs/ocfs2/localalloc.c
48028@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
48029 goto bail;
48030 }
48031
48032- atomic_inc(&osb->alloc_stats.moves);
48033+ atomic_inc_unchecked(&osb->alloc_stats.moves);
48034
48035 bail:
48036 if (handle)
48037diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
48038index d355e6e..578d905 100644
48039--- a/fs/ocfs2/ocfs2.h
48040+++ b/fs/ocfs2/ocfs2.h
48041@@ -235,11 +235,11 @@ enum ocfs2_vol_state
48042
48043 struct ocfs2_alloc_stats
48044 {
48045- atomic_t moves;
48046- atomic_t local_data;
48047- atomic_t bitmap_data;
48048- atomic_t bg_allocs;
48049- atomic_t bg_extends;
48050+ atomic_unchecked_t moves;
48051+ atomic_unchecked_t local_data;
48052+ atomic_unchecked_t bitmap_data;
48053+ atomic_unchecked_t bg_allocs;
48054+ atomic_unchecked_t bg_extends;
48055 };
48056
48057 enum ocfs2_local_alloc_state
48058diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
48059index f169da4..9112253 100644
48060--- a/fs/ocfs2/suballoc.c
48061+++ b/fs/ocfs2/suballoc.c
48062@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
48063 mlog_errno(status);
48064 goto bail;
48065 }
48066- atomic_inc(&osb->alloc_stats.bg_extends);
48067+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
48068
48069 /* You should never ask for this much metadata */
48070 BUG_ON(bits_wanted >
48071@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
48072 mlog_errno(status);
48073 goto bail;
48074 }
48075- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48076+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48077
48078 *suballoc_loc = res.sr_bg_blkno;
48079 *suballoc_bit_start = res.sr_bit_offset;
48080@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
48081 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
48082 res->sr_bits);
48083
48084- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48085+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48086
48087 BUG_ON(res->sr_bits != 1);
48088
48089@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
48090 mlog_errno(status);
48091 goto bail;
48092 }
48093- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48094+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
48095
48096 BUG_ON(res.sr_bits != 1);
48097
48098@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
48099 cluster_start,
48100 num_clusters);
48101 if (!status)
48102- atomic_inc(&osb->alloc_stats.local_data);
48103+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
48104 } else {
48105 if (min_clusters > (osb->bitmap_cpg - 1)) {
48106 /* The only paths asking for contiguousness
48107@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
48108 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
48109 res.sr_bg_blkno,
48110 res.sr_bit_offset);
48111- atomic_inc(&osb->alloc_stats.bitmap_data);
48112+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
48113 *num_clusters = res.sr_bits;
48114 }
48115 }
48116diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
48117index 68f4541..89cfe6a 100644
48118--- a/fs/ocfs2/super.c
48119+++ b/fs/ocfs2/super.c
48120@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
48121 "%10s => GlobalAllocs: %d LocalAllocs: %d "
48122 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
48123 "Stats",
48124- atomic_read(&osb->alloc_stats.bitmap_data),
48125- atomic_read(&osb->alloc_stats.local_data),
48126- atomic_read(&osb->alloc_stats.bg_allocs),
48127- atomic_read(&osb->alloc_stats.moves),
48128- atomic_read(&osb->alloc_stats.bg_extends));
48129+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
48130+ atomic_read_unchecked(&osb->alloc_stats.local_data),
48131+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
48132+ atomic_read_unchecked(&osb->alloc_stats.moves),
48133+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
48134
48135 out += snprintf(buf + out, len - out,
48136 "%10s => State: %u Descriptor: %llu Size: %u bits "
48137@@ -2116,11 +2116,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
48138 spin_lock_init(&osb->osb_xattr_lock);
48139 ocfs2_init_steal_slots(osb);
48140
48141- atomic_set(&osb->alloc_stats.moves, 0);
48142- atomic_set(&osb->alloc_stats.local_data, 0);
48143- atomic_set(&osb->alloc_stats.bitmap_data, 0);
48144- atomic_set(&osb->alloc_stats.bg_allocs, 0);
48145- atomic_set(&osb->alloc_stats.bg_extends, 0);
48146+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
48147+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
48148+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
48149+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
48150+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
48151
48152 /* Copy the blockcheck stats from the superblock probe */
48153 osb->osb_ecc_stats = *stats;
48154diff --git a/fs/open.c b/fs/open.c
48155index 5d9c71b..adb5b19 100644
48156--- a/fs/open.c
48157+++ b/fs/open.c
48158@@ -31,6 +31,8 @@
48159 #include <linux/ima.h>
48160 #include <linux/dnotify.h>
48161
48162+#define CREATE_TRACE_POINTS
48163+#include <trace/events/fs.h>
48164 #include "internal.h"
48165
48166 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
48167@@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
48168 error = locks_verify_truncate(inode, NULL, length);
48169 if (!error)
48170 error = security_path_truncate(&path);
48171+
48172+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
48173+ error = -EACCES;
48174+
48175 if (!error)
48176 error = do_truncate(path.dentry, length, 0, NULL);
48177
48178@@ -359,6 +365,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
48179 if (__mnt_is_readonly(path.mnt))
48180 res = -EROFS;
48181
48182+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
48183+ res = -EACCES;
48184+
48185 out_path_release:
48186 path_put(&path);
48187 out:
48188@@ -385,6 +394,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
48189 if (error)
48190 goto dput_and_out;
48191
48192+ gr_log_chdir(path.dentry, path.mnt);
48193+
48194 set_fs_pwd(current->fs, &path);
48195
48196 dput_and_out:
48197@@ -411,6 +422,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
48198 goto out_putf;
48199
48200 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
48201+
48202+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
48203+ error = -EPERM;
48204+
48205+ if (!error)
48206+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
48207+
48208 if (!error)
48209 set_fs_pwd(current->fs, &file->f_path);
48210 out_putf:
48211@@ -439,7 +457,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
48212 if (error)
48213 goto dput_and_out;
48214
48215+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
48216+ goto dput_and_out;
48217+
48218 set_fs_root(current->fs, &path);
48219+
48220+ gr_handle_chroot_chdir(&path);
48221+
48222 error = 0;
48223 dput_and_out:
48224 path_put(&path);
48225@@ -457,6 +481,16 @@ static int chmod_common(struct path *path, umode_t mode)
48226 if (error)
48227 return error;
48228 mutex_lock(&inode->i_mutex);
48229+
48230+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
48231+ error = -EACCES;
48232+ goto out_unlock;
48233+ }
48234+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
48235+ error = -EACCES;
48236+ goto out_unlock;
48237+ }
48238+
48239 error = security_path_chmod(path, mode);
48240 if (error)
48241 goto out_unlock;
48242@@ -512,6 +546,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
48243 uid = make_kuid(current_user_ns(), user);
48244 gid = make_kgid(current_user_ns(), group);
48245
48246+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
48247+ return -EACCES;
48248+
48249 newattrs.ia_valid = ATTR_CTIME;
48250 if (user != (uid_t) -1) {
48251 if (!uid_valid(uid))
48252@@ -1036,6 +1073,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
48253 } else {
48254 fsnotify_open(f);
48255 fd_install(fd, f);
48256+ trace_do_sys_open(tmp, flags, mode);
48257 }
48258 }
48259 putname(tmp);
48260diff --git a/fs/pipe.c b/fs/pipe.c
48261index 49c1065..13b9e12 100644
48262--- a/fs/pipe.c
48263+++ b/fs/pipe.c
48264@@ -438,9 +438,9 @@ redo:
48265 }
48266 if (bufs) /* More to do? */
48267 continue;
48268- if (!pipe->writers)
48269+ if (!atomic_read(&pipe->writers))
48270 break;
48271- if (!pipe->waiting_writers) {
48272+ if (!atomic_read(&pipe->waiting_writers)) {
48273 /* syscall merging: Usually we must not sleep
48274 * if O_NONBLOCK is set, or if we got some data.
48275 * But if a writer sleeps in kernel space, then
48276@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
48277 mutex_lock(&inode->i_mutex);
48278 pipe = inode->i_pipe;
48279
48280- if (!pipe->readers) {
48281+ if (!atomic_read(&pipe->readers)) {
48282 send_sig(SIGPIPE, current, 0);
48283 ret = -EPIPE;
48284 goto out;
48285@@ -553,7 +553,7 @@ redo1:
48286 for (;;) {
48287 int bufs;
48288
48289- if (!pipe->readers) {
48290+ if (!atomic_read(&pipe->readers)) {
48291 send_sig(SIGPIPE, current, 0);
48292 if (!ret)
48293 ret = -EPIPE;
48294@@ -644,9 +644,9 @@ redo2:
48295 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
48296 do_wakeup = 0;
48297 }
48298- pipe->waiting_writers++;
48299+ atomic_inc(&pipe->waiting_writers);
48300 pipe_wait(pipe);
48301- pipe->waiting_writers--;
48302+ atomic_dec(&pipe->waiting_writers);
48303 }
48304 out:
48305 mutex_unlock(&inode->i_mutex);
48306@@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
48307 mask = 0;
48308 if (filp->f_mode & FMODE_READ) {
48309 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
48310- if (!pipe->writers && filp->f_version != pipe->w_counter)
48311+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
48312 mask |= POLLHUP;
48313 }
48314
48315@@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
48316 * Most Unices do not set POLLERR for FIFOs but on Linux they
48317 * behave exactly like pipes for poll().
48318 */
48319- if (!pipe->readers)
48320+ if (!atomic_read(&pipe->readers))
48321 mask |= POLLERR;
48322 }
48323
48324@@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
48325
48326 mutex_lock(&inode->i_mutex);
48327 pipe = inode->i_pipe;
48328- pipe->readers -= decr;
48329- pipe->writers -= decw;
48330+ atomic_sub(decr, &pipe->readers);
48331+ atomic_sub(decw, &pipe->writers);
48332
48333- if (!pipe->readers && !pipe->writers) {
48334+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
48335 free_pipe_info(inode);
48336 } else {
48337 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
48338@@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
48339
48340 if (inode->i_pipe) {
48341 ret = 0;
48342- inode->i_pipe->readers++;
48343+ atomic_inc(&inode->i_pipe->readers);
48344 }
48345
48346 mutex_unlock(&inode->i_mutex);
48347@@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
48348
48349 if (inode->i_pipe) {
48350 ret = 0;
48351- inode->i_pipe->writers++;
48352+ atomic_inc(&inode->i_pipe->writers);
48353 }
48354
48355 mutex_unlock(&inode->i_mutex);
48356@@ -868,9 +868,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
48357 if (inode->i_pipe) {
48358 ret = 0;
48359 if (filp->f_mode & FMODE_READ)
48360- inode->i_pipe->readers++;
48361+ atomic_inc(&inode->i_pipe->readers);
48362 if (filp->f_mode & FMODE_WRITE)
48363- inode->i_pipe->writers++;
48364+ atomic_inc(&inode->i_pipe->writers);
48365 }
48366
48367 mutex_unlock(&inode->i_mutex);
48368@@ -962,7 +962,7 @@ void free_pipe_info(struct inode *inode)
48369 inode->i_pipe = NULL;
48370 }
48371
48372-static struct vfsmount *pipe_mnt __read_mostly;
48373+struct vfsmount *pipe_mnt __read_mostly;
48374
48375 /*
48376 * pipefs_dname() is called from d_path().
48377@@ -992,7 +992,8 @@ static struct inode * get_pipe_inode(void)
48378 goto fail_iput;
48379 inode->i_pipe = pipe;
48380
48381- pipe->readers = pipe->writers = 1;
48382+ atomic_set(&pipe->readers, 1);
48383+ atomic_set(&pipe->writers, 1);
48384 inode->i_fop = &rdwr_pipefifo_fops;
48385
48386 /*
48387diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
48388index 15af622..0e9f4467 100644
48389--- a/fs/proc/Kconfig
48390+++ b/fs/proc/Kconfig
48391@@ -30,12 +30,12 @@ config PROC_FS
48392
48393 config PROC_KCORE
48394 bool "/proc/kcore support" if !ARM
48395- depends on PROC_FS && MMU
48396+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
48397
48398 config PROC_VMCORE
48399 bool "/proc/vmcore support"
48400- depends on PROC_FS && CRASH_DUMP
48401- default y
48402+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
48403+ default n
48404 help
48405 Exports the dump image of crashed kernel in ELF format.
48406
48407@@ -59,8 +59,8 @@ config PROC_SYSCTL
48408 limited in memory.
48409
48410 config PROC_PAGE_MONITOR
48411- default y
48412- depends on PROC_FS && MMU
48413+ default n
48414+ depends on PROC_FS && MMU && !GRKERNSEC
48415 bool "Enable /proc page monitoring" if EXPERT
48416 help
48417 Various /proc files exist to monitor process memory utilization:
48418diff --git a/fs/proc/array.c b/fs/proc/array.c
48419index c1c207c..5179411 100644
48420--- a/fs/proc/array.c
48421+++ b/fs/proc/array.c
48422@@ -60,6 +60,7 @@
48423 #include <linux/tty.h>
48424 #include <linux/string.h>
48425 #include <linux/mman.h>
48426+#include <linux/grsecurity.h>
48427 #include <linux/proc_fs.h>
48428 #include <linux/ioport.h>
48429 #include <linux/uaccess.h>
48430@@ -346,6 +347,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
48431 seq_putc(m, '\n');
48432 }
48433
48434+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48435+static inline void task_pax(struct seq_file *m, struct task_struct *p)
48436+{
48437+ if (p->mm)
48438+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
48439+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
48440+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
48441+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
48442+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
48443+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
48444+ else
48445+ seq_printf(m, "PaX:\t-----\n");
48446+}
48447+#endif
48448+
48449 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48450 struct pid *pid, struct task_struct *task)
48451 {
48452@@ -363,9 +379,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48453 task_cpus_allowed(m, task);
48454 cpuset_task_status_allowed(m, task);
48455 task_context_switch_counts(m, task);
48456+
48457+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48458+ task_pax(m, task);
48459+#endif
48460+
48461+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
48462+ task_grsec_rbac(m, task);
48463+#endif
48464+
48465 return 0;
48466 }
48467
48468+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48469+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48470+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
48471+ _mm->pax_flags & MF_PAX_SEGMEXEC))
48472+#endif
48473+
48474 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48475 struct pid *pid, struct task_struct *task, int whole)
48476 {
48477@@ -387,6 +418,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48478 char tcomm[sizeof(task->comm)];
48479 unsigned long flags;
48480
48481+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48482+ if (current->exec_id != m->exec_id) {
48483+ gr_log_badprocpid("stat");
48484+ return 0;
48485+ }
48486+#endif
48487+
48488 state = *get_task_state(task);
48489 vsize = eip = esp = 0;
48490 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
48491@@ -458,6 +496,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48492 gtime = task->gtime;
48493 }
48494
48495+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48496+ if (PAX_RAND_FLAGS(mm)) {
48497+ eip = 0;
48498+ esp = 0;
48499+ wchan = 0;
48500+ }
48501+#endif
48502+#ifdef CONFIG_GRKERNSEC_HIDESYM
48503+ wchan = 0;
48504+ eip =0;
48505+ esp =0;
48506+#endif
48507+
48508 /* scale priority and nice values from timeslices to -20..20 */
48509 /* to make it look like a "normal" Unix priority/nice value */
48510 priority = task_prio(task);
48511@@ -494,9 +545,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48512 seq_put_decimal_ull(m, ' ', vsize);
48513 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
48514 seq_put_decimal_ull(m, ' ', rsslim);
48515+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48516+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
48517+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
48518+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
48519+#else
48520 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
48521 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
48522 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
48523+#endif
48524 seq_put_decimal_ull(m, ' ', esp);
48525 seq_put_decimal_ull(m, ' ', eip);
48526 /* The signal information here is obsolete.
48527@@ -518,7 +575,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48528 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
48529 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
48530
48531- if (mm && permitted) {
48532+ if (mm && permitted
48533+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48534+ && !PAX_RAND_FLAGS(mm)
48535+#endif
48536+ ) {
48537 seq_put_decimal_ull(m, ' ', mm->start_data);
48538 seq_put_decimal_ull(m, ' ', mm->end_data);
48539 seq_put_decimal_ull(m, ' ', mm->start_brk);
48540@@ -556,8 +617,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48541 struct pid *pid, struct task_struct *task)
48542 {
48543 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
48544- struct mm_struct *mm = get_task_mm(task);
48545+ struct mm_struct *mm;
48546
48547+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48548+ if (current->exec_id != m->exec_id) {
48549+ gr_log_badprocpid("statm");
48550+ return 0;
48551+ }
48552+#endif
48553+ mm = get_task_mm(task);
48554 if (mm) {
48555 size = task_statm(mm, &shared, &text, &data, &resident);
48556 mmput(mm);
48557@@ -580,6 +648,21 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48558 return 0;
48559 }
48560
48561+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48562+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
48563+{
48564+ u32 curr_ip = 0;
48565+ unsigned long flags;
48566+
48567+ if (lock_task_sighand(task, &flags)) {
48568+ curr_ip = task->signal->curr_ip;
48569+ unlock_task_sighand(task, &flags);
48570+ }
48571+
48572+ return sprintf(buffer, "%pI4\n", &curr_ip);
48573+}
48574+#endif
48575+
48576 #ifdef CONFIG_CHECKPOINT_RESTORE
48577 static struct pid *
48578 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
48579diff --git a/fs/proc/base.c b/fs/proc/base.c
48580index 437195f..cd2210d 100644
48581--- a/fs/proc/base.c
48582+++ b/fs/proc/base.c
48583@@ -110,6 +110,14 @@ struct pid_entry {
48584 union proc_op op;
48585 };
48586
48587+struct getdents_callback {
48588+ struct linux_dirent __user * current_dir;
48589+ struct linux_dirent __user * previous;
48590+ struct file * file;
48591+ int count;
48592+ int error;
48593+};
48594+
48595 #define NOD(NAME, MODE, IOP, FOP, OP) { \
48596 .name = (NAME), \
48597 .len = sizeof(NAME) - 1, \
48598@@ -209,6 +217,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
48599 if (!mm->arg_end)
48600 goto out_mm; /* Shh! No looking before we're done */
48601
48602+ if (gr_acl_handle_procpidmem(task))
48603+ goto out_mm;
48604+
48605 len = mm->arg_end - mm->arg_start;
48606
48607 if (len > PAGE_SIZE)
48608@@ -236,12 +247,28 @@ out:
48609 return res;
48610 }
48611
48612+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48613+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48614+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
48615+ _mm->pax_flags & MF_PAX_SEGMEXEC))
48616+#endif
48617+
48618 static int proc_pid_auxv(struct task_struct *task, char *buffer)
48619 {
48620 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
48621 int res = PTR_ERR(mm);
48622 if (mm && !IS_ERR(mm)) {
48623 unsigned int nwords = 0;
48624+
48625+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48626+ /* allow if we're currently ptracing this task */
48627+ if (PAX_RAND_FLAGS(mm) &&
48628+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
48629+ mmput(mm);
48630+ return 0;
48631+ }
48632+#endif
48633+
48634 do {
48635 nwords += 2;
48636 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
48637@@ -255,7 +282,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
48638 }
48639
48640
48641-#ifdef CONFIG_KALLSYMS
48642+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48643 /*
48644 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
48645 * Returns the resolved symbol. If that fails, simply return the address.
48646@@ -294,7 +321,7 @@ static void unlock_trace(struct task_struct *task)
48647 mutex_unlock(&task->signal->cred_guard_mutex);
48648 }
48649
48650-#ifdef CONFIG_STACKTRACE
48651+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48652
48653 #define MAX_STACK_TRACE_DEPTH 64
48654
48655@@ -486,7 +513,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
48656 return count;
48657 }
48658
48659-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48660+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48661 static int proc_pid_syscall(struct task_struct *task, char *buffer)
48662 {
48663 long nr;
48664@@ -515,7 +542,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
48665 /************************************************************************/
48666
48667 /* permission checks */
48668-static int proc_fd_access_allowed(struct inode *inode)
48669+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
48670 {
48671 struct task_struct *task;
48672 int allowed = 0;
48673@@ -525,7 +552,10 @@ static int proc_fd_access_allowed(struct inode *inode)
48674 */
48675 task = get_proc_task(inode);
48676 if (task) {
48677- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
48678+ if (log)
48679+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
48680+ else
48681+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
48682 put_task_struct(task);
48683 }
48684 return allowed;
48685@@ -563,10 +593,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
48686 struct task_struct *task,
48687 int hide_pid_min)
48688 {
48689+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48690+ return false;
48691+
48692+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48693+ rcu_read_lock();
48694+ {
48695+ const struct cred *tmpcred = current_cred();
48696+ const struct cred *cred = __task_cred(task);
48697+
48698+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
48699+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48700+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
48701+#endif
48702+ ) {
48703+ rcu_read_unlock();
48704+ return true;
48705+ }
48706+ }
48707+ rcu_read_unlock();
48708+
48709+ if (!pid->hide_pid)
48710+ return false;
48711+#endif
48712+
48713 if (pid->hide_pid < hide_pid_min)
48714 return true;
48715 if (in_group_p(pid->pid_gid))
48716 return true;
48717+
48718 return ptrace_may_access(task, PTRACE_MODE_READ);
48719 }
48720
48721@@ -584,7 +639,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
48722 put_task_struct(task);
48723
48724 if (!has_perms) {
48725+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48726+ {
48727+#else
48728 if (pid->hide_pid == 2) {
48729+#endif
48730 /*
48731 * Let's make getdents(), stat(), and open()
48732 * consistent with each other. If a process
48733@@ -682,6 +741,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
48734 if (!task)
48735 return -ESRCH;
48736
48737+ if (gr_acl_handle_procpidmem(task)) {
48738+ put_task_struct(task);
48739+ return -EPERM;
48740+ }
48741+
48742 mm = mm_access(task, mode);
48743 put_task_struct(task);
48744
48745@@ -695,16 +759,24 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
48746 mmput(mm);
48747 }
48748
48749- /* OK to pass negative loff_t, we can catch out-of-range */
48750- file->f_mode |= FMODE_UNSIGNED_OFFSET;
48751 file->private_data = mm;
48752
48753+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48754+ file->f_version = current->exec_id;
48755+#endif
48756+
48757 return 0;
48758 }
48759
48760 static int mem_open(struct inode *inode, struct file *file)
48761 {
48762- return __mem_open(inode, file, PTRACE_MODE_ATTACH);
48763+ int ret;
48764+ ret = __mem_open(inode, file, PTRACE_MODE_ATTACH);
48765+
48766+ /* OK to pass negative loff_t, we can catch out-of-range */
48767+ file->f_mode |= FMODE_UNSIGNED_OFFSET;
48768+
48769+ return ret;
48770 }
48771
48772 static ssize_t mem_rw(struct file *file, char __user *buf,
48773@@ -715,6 +787,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
48774 ssize_t copied;
48775 char *page;
48776
48777+#ifdef CONFIG_GRKERNSEC
48778+ if (write)
48779+ return -EPERM;
48780+#endif
48781+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48782+ if (file->f_version != current->exec_id) {
48783+ gr_log_badprocpid("mem");
48784+ return 0;
48785+ }
48786+#endif
48787+
48788 if (!mm)
48789 return 0;
48790
48791@@ -819,6 +902,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
48792 if (!mm)
48793 return 0;
48794
48795+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48796+ if (file->f_version != current->exec_id) {
48797+ gr_log_badprocpid("environ");
48798+ return 0;
48799+ }
48800+#endif
48801+
48802 page = (char *)__get_free_page(GFP_TEMPORARY);
48803 if (!page)
48804 return -ENOMEM;
48805@@ -827,15 +917,17 @@ static ssize_t environ_read(struct file *file, char __user *buf,
48806 if (!atomic_inc_not_zero(&mm->mm_users))
48807 goto free;
48808 while (count > 0) {
48809- int this_len, retval, max_len;
48810+ size_t this_len, max_len;
48811+ int retval;
48812+
48813+ if (src >= (mm->env_end - mm->env_start))
48814+ break;
48815
48816 this_len = mm->env_end - (mm->env_start + src);
48817
48818- if (this_len <= 0)
48819- break;
48820
48821- max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
48822- this_len = (this_len > max_len) ? max_len : this_len;
48823+ max_len = min_t(size_t, PAGE_SIZE, count);
48824+ this_len = min(max_len, this_len);
48825
48826 retval = access_remote_vm(mm, (mm->env_start + src),
48827 page, this_len, 0);
48828@@ -1433,7 +1525,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
48829 path_put(&nd->path);
48830
48831 /* Are we allowed to snoop on the tasks file descriptors? */
48832- if (!proc_fd_access_allowed(inode))
48833+ if (!proc_fd_access_allowed(inode, 0))
48834 goto out;
48835
48836 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
48837@@ -1472,8 +1564,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
48838 struct path path;
48839
48840 /* Are we allowed to snoop on the tasks file descriptors? */
48841- if (!proc_fd_access_allowed(inode))
48842- goto out;
48843+ /* logging this is needed for learning on chromium to work properly,
48844+ but we don't want to flood the logs from 'ps' which does a readlink
48845+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
48846+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
48847+ */
48848+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
48849+ if (!proc_fd_access_allowed(inode,0))
48850+ goto out;
48851+ } else {
48852+ if (!proc_fd_access_allowed(inode,1))
48853+ goto out;
48854+ }
48855
48856 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
48857 if (error)
48858@@ -1538,7 +1640,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
48859 rcu_read_lock();
48860 cred = __task_cred(task);
48861 inode->i_uid = cred->euid;
48862+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48863+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48864+#else
48865 inode->i_gid = cred->egid;
48866+#endif
48867 rcu_read_unlock();
48868 }
48869 security_task_to_inode(task, inode);
48870@@ -1574,10 +1680,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
48871 return -ENOENT;
48872 }
48873 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48874+#ifdef CONFIG_GRKERNSEC_PROC_USER
48875+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48876+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48877+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48878+#endif
48879 task_dumpable(task)) {
48880 cred = __task_cred(task);
48881 stat->uid = cred->euid;
48882+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48883+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
48884+#else
48885 stat->gid = cred->egid;
48886+#endif
48887 }
48888 }
48889 rcu_read_unlock();
48890@@ -1615,11 +1730,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
48891
48892 if (task) {
48893 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48894+#ifdef CONFIG_GRKERNSEC_PROC_USER
48895+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48896+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48897+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48898+#endif
48899 task_dumpable(task)) {
48900 rcu_read_lock();
48901 cred = __task_cred(task);
48902 inode->i_uid = cred->euid;
48903+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48904+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48905+#else
48906 inode->i_gid = cred->egid;
48907+#endif
48908 rcu_read_unlock();
48909 } else {
48910 inode->i_uid = GLOBAL_ROOT_UID;
48911@@ -1737,7 +1861,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
48912 int fd = proc_fd(inode);
48913
48914 if (task) {
48915- files = get_files_struct(task);
48916+ if (!gr_acl_handle_procpidmem(task))
48917+ files = get_files_struct(task);
48918 put_task_struct(task);
48919 }
48920 if (files) {
48921@@ -2336,11 +2461,21 @@ static const struct file_operations proc_map_files_operations = {
48922 */
48923 static int proc_fd_permission(struct inode *inode, int mask)
48924 {
48925+ struct task_struct *task;
48926 int rv = generic_permission(inode, mask);
48927- if (rv == 0)
48928- return 0;
48929+
48930 if (task_pid(current) == proc_pid(inode))
48931 rv = 0;
48932+
48933+ task = get_proc_task(inode);
48934+ if (task == NULL)
48935+ return rv;
48936+
48937+ if (gr_acl_handle_procpidmem(task))
48938+ rv = -EACCES;
48939+
48940+ put_task_struct(task);
48941+
48942 return rv;
48943 }
48944
48945@@ -2450,6 +2585,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
48946 if (!task)
48947 goto out_no_task;
48948
48949+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48950+ goto out;
48951+
48952 /*
48953 * Yes, it does not scale. And it should not. Don't add
48954 * new entries into /proc/<tgid>/ without very good reasons.
48955@@ -2494,6 +2632,9 @@ static int proc_pident_readdir(struct file *filp,
48956 if (!task)
48957 goto out_no_task;
48958
48959+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48960+ goto out;
48961+
48962 ret = 0;
48963 i = filp->f_pos;
48964 switch (i) {
48965@@ -2764,7 +2905,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
48966 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
48967 void *cookie)
48968 {
48969- char *s = nd_get_link(nd);
48970+ const char *s = nd_get_link(nd);
48971 if (!IS_ERR(s))
48972 __putname(s);
48973 }
48974@@ -3033,7 +3174,7 @@ static const struct pid_entry tgid_base_stuff[] = {
48975 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
48976 #endif
48977 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48978-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48979+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48980 INF("syscall", S_IRUGO, proc_pid_syscall),
48981 #endif
48982 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48983@@ -3058,10 +3199,10 @@ static const struct pid_entry tgid_base_stuff[] = {
48984 #ifdef CONFIG_SECURITY
48985 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48986 #endif
48987-#ifdef CONFIG_KALLSYMS
48988+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48989 INF("wchan", S_IRUGO, proc_pid_wchan),
48990 #endif
48991-#ifdef CONFIG_STACKTRACE
48992+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48993 ONE("stack", S_IRUGO, proc_pid_stack),
48994 #endif
48995 #ifdef CONFIG_SCHEDSTATS
48996@@ -3095,6 +3236,9 @@ static const struct pid_entry tgid_base_stuff[] = {
48997 #ifdef CONFIG_HARDWALL
48998 INF("hardwall", S_IRUGO, proc_pid_hardwall),
48999 #endif
49000+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
49001+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
49002+#endif
49003 #ifdef CONFIG_USER_NS
49004 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
49005 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
49006@@ -3225,7 +3369,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
49007 if (!inode)
49008 goto out;
49009
49010+#ifdef CONFIG_GRKERNSEC_PROC_USER
49011+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
49012+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49013+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
49014+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
49015+#else
49016 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
49017+#endif
49018 inode->i_op = &proc_tgid_base_inode_operations;
49019 inode->i_fop = &proc_tgid_base_operations;
49020 inode->i_flags|=S_IMMUTABLE;
49021@@ -3267,7 +3418,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
49022 if (!task)
49023 goto out;
49024
49025+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
49026+ goto out_put_task;
49027+
49028 result = proc_pid_instantiate(dir, dentry, task, NULL);
49029+out_put_task:
49030 put_task_struct(task);
49031 out:
49032 return result;
49033@@ -3330,6 +3485,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
49034 static int fake_filldir(void *buf, const char *name, int namelen,
49035 loff_t offset, u64 ino, unsigned d_type)
49036 {
49037+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
49038+ __buf->error = -EINVAL;
49039 return 0;
49040 }
49041
49042@@ -3396,7 +3553,7 @@ static const struct pid_entry tid_base_stuff[] = {
49043 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
49044 #endif
49045 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
49046-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
49047+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
49048 INF("syscall", S_IRUGO, proc_pid_syscall),
49049 #endif
49050 INF("cmdline", S_IRUGO, proc_pid_cmdline),
49051@@ -3423,10 +3580,10 @@ static const struct pid_entry tid_base_stuff[] = {
49052 #ifdef CONFIG_SECURITY
49053 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
49054 #endif
49055-#ifdef CONFIG_KALLSYMS
49056+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49057 INF("wchan", S_IRUGO, proc_pid_wchan),
49058 #endif
49059-#ifdef CONFIG_STACKTRACE
49060+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
49061 ONE("stack", S_IRUGO, proc_pid_stack),
49062 #endif
49063 #ifdef CONFIG_SCHEDSTATS
49064diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
49065index 82676e3..5f8518a 100644
49066--- a/fs/proc/cmdline.c
49067+++ b/fs/proc/cmdline.c
49068@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
49069
49070 static int __init proc_cmdline_init(void)
49071 {
49072+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49073+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
49074+#else
49075 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
49076+#endif
49077 return 0;
49078 }
49079 module_init(proc_cmdline_init);
49080diff --git a/fs/proc/devices.c b/fs/proc/devices.c
49081index b143471..bb105e5 100644
49082--- a/fs/proc/devices.c
49083+++ b/fs/proc/devices.c
49084@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
49085
49086 static int __init proc_devices_init(void)
49087 {
49088+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49089+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
49090+#else
49091 proc_create("devices", 0, NULL, &proc_devinfo_operations);
49092+#endif
49093 return 0;
49094 }
49095 module_init(proc_devices_init);
49096diff --git a/fs/proc/inode.c b/fs/proc/inode.c
49097index 7ac817b..abab1a5 100644
49098--- a/fs/proc/inode.c
49099+++ b/fs/proc/inode.c
49100@@ -21,11 +21,17 @@
49101 #include <linux/seq_file.h>
49102 #include <linux/slab.h>
49103 #include <linux/mount.h>
49104+#include <linux/grsecurity.h>
49105
49106 #include <asm/uaccess.h>
49107
49108 #include "internal.h"
49109
49110+#ifdef CONFIG_PROC_SYSCTL
49111+extern const struct inode_operations proc_sys_inode_operations;
49112+extern const struct inode_operations proc_sys_dir_operations;
49113+#endif
49114+
49115 static void proc_evict_inode(struct inode *inode)
49116 {
49117 struct proc_dir_entry *de;
49118@@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
49119 ns_ops = PROC_I(inode)->ns_ops;
49120 if (ns_ops && ns_ops->put)
49121 ns_ops->put(PROC_I(inode)->ns);
49122+
49123+#ifdef CONFIG_PROC_SYSCTL
49124+ if (inode->i_op == &proc_sys_inode_operations ||
49125+ inode->i_op == &proc_sys_dir_operations)
49126+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
49127+#endif
49128+
49129 }
49130
49131 static struct kmem_cache * proc_inode_cachep;
49132@@ -456,7 +469,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
49133 if (de->mode) {
49134 inode->i_mode = de->mode;
49135 inode->i_uid = de->uid;
49136+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
49137+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
49138+#else
49139 inode->i_gid = de->gid;
49140+#endif
49141 }
49142 if (de->size)
49143 inode->i_size = de->size;
49144diff --git a/fs/proc/internal.h b/fs/proc/internal.h
49145index eca4aca..19166b2 100644
49146--- a/fs/proc/internal.h
49147+++ b/fs/proc/internal.h
49148@@ -52,6 +52,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
49149 struct pid *pid, struct task_struct *task);
49150 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
49151 struct pid *pid, struct task_struct *task);
49152+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
49153+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
49154+#endif
49155 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
49156
49157 extern const struct file_operations proc_tid_children_operations;
49158diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
49159index 86c67ee..cdca321 100644
49160--- a/fs/proc/kcore.c
49161+++ b/fs/proc/kcore.c
49162@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
49163 * the addresses in the elf_phdr on our list.
49164 */
49165 start = kc_offset_to_vaddr(*fpos - elf_buflen);
49166- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
49167+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
49168+ if (tsz > buflen)
49169 tsz = buflen;
49170-
49171+
49172 while (buflen) {
49173 struct kcore_list *m;
49174
49175@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
49176 kfree(elf_buf);
49177 } else {
49178 if (kern_addr_valid(start)) {
49179- unsigned long n;
49180+ char *elf_buf;
49181+ mm_segment_t oldfs;
49182
49183- n = copy_to_user(buffer, (char *)start, tsz);
49184- /*
49185- * We cannot distinguish between fault on source
49186- * and fault on destination. When this happens
49187- * we clear too and hope it will trigger the
49188- * EFAULT again.
49189- */
49190- if (n) {
49191- if (clear_user(buffer + tsz - n,
49192- n))
49193+ elf_buf = kmalloc(tsz, GFP_KERNEL);
49194+ if (!elf_buf)
49195+ return -ENOMEM;
49196+ oldfs = get_fs();
49197+ set_fs(KERNEL_DS);
49198+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
49199+ set_fs(oldfs);
49200+ if (copy_to_user(buffer, elf_buf, tsz)) {
49201+ kfree(elf_buf);
49202 return -EFAULT;
49203+ }
49204 }
49205+ set_fs(oldfs);
49206+ kfree(elf_buf);
49207 } else {
49208 if (clear_user(buffer, tsz))
49209 return -EFAULT;
49210@@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
49211
49212 static int open_kcore(struct inode *inode, struct file *filp)
49213 {
49214+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
49215+ return -EPERM;
49216+#endif
49217 if (!capable(CAP_SYS_RAWIO))
49218 return -EPERM;
49219 if (kcore_need_update)
49220diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
49221index 80e4645..53e5fcf 100644
49222--- a/fs/proc/meminfo.c
49223+++ b/fs/proc/meminfo.c
49224@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
49225 vmi.used >> 10,
49226 vmi.largest_chunk >> 10
49227 #ifdef CONFIG_MEMORY_FAILURE
49228- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
49229+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
49230 #endif
49231 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
49232 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
49233diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
49234index b1822dd..df622cb 100644
49235--- a/fs/proc/nommu.c
49236+++ b/fs/proc/nommu.c
49237@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
49238 if (len < 1)
49239 len = 1;
49240 seq_printf(m, "%*c", len, ' ');
49241- seq_path(m, &file->f_path, "");
49242+ seq_path(m, &file->f_path, "\n\\");
49243 }
49244
49245 seq_putc(m, '\n');
49246diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
49247index 06e1cc1..177cd98 100644
49248--- a/fs/proc/proc_net.c
49249+++ b/fs/proc/proc_net.c
49250@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
49251 struct task_struct *task;
49252 struct nsproxy *ns;
49253 struct net *net = NULL;
49254+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49255+ const struct cred *cred = current_cred();
49256+#endif
49257+
49258+#ifdef CONFIG_GRKERNSEC_PROC_USER
49259+ if (cred->fsuid)
49260+ return net;
49261+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49262+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
49263+ return net;
49264+#endif
49265
49266 rcu_read_lock();
49267 task = pid_task(proc_pid(dir), PIDTYPE_PID);
49268diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
49269index 3476bca..cb6d86a 100644
49270--- a/fs/proc/proc_sysctl.c
49271+++ b/fs/proc/proc_sysctl.c
49272@@ -12,11 +12,15 @@
49273 #include <linux/module.h>
49274 #include "internal.h"
49275
49276+extern int gr_handle_chroot_sysctl(const int op);
49277+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
49278+ const int op);
49279+
49280 static const struct dentry_operations proc_sys_dentry_operations;
49281 static const struct file_operations proc_sys_file_operations;
49282-static const struct inode_operations proc_sys_inode_operations;
49283+const struct inode_operations proc_sys_inode_operations;
49284 static const struct file_operations proc_sys_dir_file_operations;
49285-static const struct inode_operations proc_sys_dir_operations;
49286+const struct inode_operations proc_sys_dir_operations;
49287
49288 void proc_sys_poll_notify(struct ctl_table_poll *poll)
49289 {
49290@@ -470,8 +474,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
49291
49292 err = NULL;
49293 d_set_d_op(dentry, &proc_sys_dentry_operations);
49294+
49295+ gr_handle_proc_create(dentry, inode);
49296+
49297 d_add(dentry, inode);
49298
49299+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt))
49300+ err = ERR_PTR(-ENOENT);
49301+
49302 out:
49303 sysctl_head_finish(head);
49304 return err;
49305@@ -483,18 +493,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
49306 struct inode *inode = filp->f_path.dentry->d_inode;
49307 struct ctl_table_header *head = grab_header(inode);
49308 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
49309+ int op = write ? MAY_WRITE : MAY_READ;
49310 ssize_t error;
49311 size_t res;
49312
49313 if (IS_ERR(head))
49314 return PTR_ERR(head);
49315
49316+
49317 /*
49318 * At this point we know that the sysctl was not unregistered
49319 * and won't be until we finish.
49320 */
49321 error = -EPERM;
49322- if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
49323+ if (sysctl_perm(head->root, table, op))
49324 goto out;
49325
49326 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
49327@@ -502,6 +514,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
49328 if (!table->proc_handler)
49329 goto out;
49330
49331+#ifdef CONFIG_GRKERNSEC
49332+ error = -EPERM;
49333+ if (gr_handle_chroot_sysctl(op))
49334+ goto out;
49335+ dget(filp->f_path.dentry);
49336+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
49337+ dput(filp->f_path.dentry);
49338+ goto out;
49339+ }
49340+ dput(filp->f_path.dentry);
49341+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
49342+ goto out;
49343+ if (write && !capable(CAP_SYS_ADMIN))
49344+ goto out;
49345+#endif
49346+
49347 /* careful: calling conventions are nasty here */
49348 res = count;
49349 error = table->proc_handler(table, write, buf, &res, ppos);
49350@@ -599,6 +627,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
49351 return -ENOMEM;
49352 } else {
49353 d_set_d_op(child, &proc_sys_dentry_operations);
49354+
49355+ gr_handle_proc_create(child, inode);
49356+
49357 d_add(child, inode);
49358 }
49359 } else {
49360@@ -642,6 +673,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
49361 if ((*pos)++ < file->f_pos)
49362 return 0;
49363
49364+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
49365+ return 0;
49366+
49367 if (unlikely(S_ISLNK(table->mode)))
49368 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
49369 else
49370@@ -759,6 +793,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
49371 if (IS_ERR(head))
49372 return PTR_ERR(head);
49373
49374+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
49375+ return -ENOENT;
49376+
49377 generic_fillattr(inode, stat);
49378 if (table)
49379 stat->mode = (stat->mode & S_IFMT) | table->mode;
49380@@ -781,13 +818,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
49381 .llseek = generic_file_llseek,
49382 };
49383
49384-static const struct inode_operations proc_sys_inode_operations = {
49385+const struct inode_operations proc_sys_inode_operations = {
49386 .permission = proc_sys_permission,
49387 .setattr = proc_sys_setattr,
49388 .getattr = proc_sys_getattr,
49389 };
49390
49391-static const struct inode_operations proc_sys_dir_operations = {
49392+const struct inode_operations proc_sys_dir_operations = {
49393 .lookup = proc_sys_lookup,
49394 .permission = proc_sys_permission,
49395 .setattr = proc_sys_setattr,
49396diff --git a/fs/proc/root.c b/fs/proc/root.c
49397index 7c30fce..b3d3aa2 100644
49398--- a/fs/proc/root.c
49399+++ b/fs/proc/root.c
49400@@ -188,7 +188,15 @@ void __init proc_root_init(void)
49401 #ifdef CONFIG_PROC_DEVICETREE
49402 proc_device_tree_init();
49403 #endif
49404+#ifdef CONFIG_GRKERNSEC_PROC_ADD
49405+#ifdef CONFIG_GRKERNSEC_PROC_USER
49406+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
49407+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49408+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
49409+#endif
49410+#else
49411 proc_mkdir("bus", NULL);
49412+#endif
49413 proc_sys_init();
49414 }
49415
49416diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
49417index 4540b8f..1b9772f 100644
49418--- a/fs/proc/task_mmu.c
49419+++ b/fs/proc/task_mmu.c
49420@@ -11,12 +11,19 @@
49421 #include <linux/rmap.h>
49422 #include <linux/swap.h>
49423 #include <linux/swapops.h>
49424+#include <linux/grsecurity.h>
49425
49426 #include <asm/elf.h>
49427 #include <asm/uaccess.h>
49428 #include <asm/tlbflush.h>
49429 #include "internal.h"
49430
49431+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49432+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
49433+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
49434+ _mm->pax_flags & MF_PAX_SEGMEXEC))
49435+#endif
49436+
49437 void task_mem(struct seq_file *m, struct mm_struct *mm)
49438 {
49439 unsigned long data, text, lib, swap;
49440@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49441 "VmExe:\t%8lu kB\n"
49442 "VmLib:\t%8lu kB\n"
49443 "VmPTE:\t%8lu kB\n"
49444- "VmSwap:\t%8lu kB\n",
49445- hiwater_vm << (PAGE_SHIFT-10),
49446+ "VmSwap:\t%8lu kB\n"
49447+
49448+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49449+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
49450+#endif
49451+
49452+ ,hiwater_vm << (PAGE_SHIFT-10),
49453 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
49454 mm->locked_vm << (PAGE_SHIFT-10),
49455 mm->pinned_vm << (PAGE_SHIFT-10),
49456@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49457 data << (PAGE_SHIFT-10),
49458 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
49459 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
49460- swap << (PAGE_SHIFT-10));
49461+ swap << (PAGE_SHIFT-10)
49462+
49463+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49464+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49465+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
49466+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
49467+#else
49468+ , mm->context.user_cs_base
49469+ , mm->context.user_cs_limit
49470+#endif
49471+#endif
49472+
49473+ );
49474 }
49475
49476 unsigned long task_vsize(struct mm_struct *mm)
49477@@ -231,13 +255,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
49478 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
49479 }
49480
49481- /* We don't show the stack guard page in /proc/maps */
49482+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49483+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
49484+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
49485+#else
49486 start = vma->vm_start;
49487- if (stack_guard_page_start(vma, start))
49488- start += PAGE_SIZE;
49489 end = vma->vm_end;
49490- if (stack_guard_page_end(vma, end))
49491- end -= PAGE_SIZE;
49492+#endif
49493
49494 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
49495 start,
49496@@ -246,7 +270,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
49497 flags & VM_WRITE ? 'w' : '-',
49498 flags & VM_EXEC ? 'x' : '-',
49499 flags & VM_MAYSHARE ? 's' : 'p',
49500+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49501+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
49502+#else
49503 pgoff,
49504+#endif
49505 MAJOR(dev), MINOR(dev), ino, &len);
49506
49507 /*
49508@@ -255,7 +283,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
49509 */
49510 if (file) {
49511 pad_len_spaces(m, len);
49512- seq_path(m, &file->f_path, "\n");
49513+ seq_path(m, &file->f_path, "\n\\");
49514 goto done;
49515 }
49516
49517@@ -281,8 +309,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
49518 * Thread stack in /proc/PID/task/TID/maps or
49519 * the main process stack.
49520 */
49521- if (!is_pid || (vma->vm_start <= mm->start_stack &&
49522- vma->vm_end >= mm->start_stack)) {
49523+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
49524+ (vma->vm_start <= mm->start_stack &&
49525+ vma->vm_end >= mm->start_stack)) {
49526 name = "[stack]";
49527 } else {
49528 /* Thread stack in /proc/PID/maps */
49529@@ -306,6 +335,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
49530 struct proc_maps_private *priv = m->private;
49531 struct task_struct *task = priv->task;
49532
49533+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49534+ if (current->exec_id != m->exec_id) {
49535+ gr_log_badprocpid("maps");
49536+ return 0;
49537+ }
49538+#endif
49539+
49540 show_map_vma(m, vma, is_pid);
49541
49542 if (m->count < m->size) /* vma is copied successfully */
49543@@ -492,12 +528,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
49544 .private = &mss,
49545 };
49546
49547+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49548+ if (current->exec_id != m->exec_id) {
49549+ gr_log_badprocpid("smaps");
49550+ return 0;
49551+ }
49552+#endif
49553 memset(&mss, 0, sizeof mss);
49554- mss.vma = vma;
49555- /* mmap_sem is held in m_start */
49556- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
49557- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
49558-
49559+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49560+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
49561+#endif
49562+ mss.vma = vma;
49563+ /* mmap_sem is held in m_start */
49564+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
49565+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
49566+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49567+ }
49568+#endif
49569 show_map_vma(m, vma, is_pid);
49570
49571 seq_printf(m,
49572@@ -515,7 +562,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
49573 "KernelPageSize: %8lu kB\n"
49574 "MMUPageSize: %8lu kB\n"
49575 "Locked: %8lu kB\n",
49576+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49577+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
49578+#else
49579 (vma->vm_end - vma->vm_start) >> 10,
49580+#endif
49581 mss.resident >> 10,
49582 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
49583 mss.shared_clean >> 10,
49584@@ -1164,6 +1215,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
49585 int n;
49586 char buffer[50];
49587
49588+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49589+ if (current->exec_id != m->exec_id) {
49590+ gr_log_badprocpid("numa_maps");
49591+ return 0;
49592+ }
49593+#endif
49594+
49595 if (!mm)
49596 return 0;
49597
49598@@ -1181,11 +1239,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
49599 mpol_to_str(buffer, sizeof(buffer), pol, 0);
49600 mpol_cond_put(pol);
49601
49602+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49603+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
49604+#else
49605 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
49606+#endif
49607
49608 if (file) {
49609 seq_printf(m, " file=");
49610- seq_path(m, &file->f_path, "\n\t= ");
49611+ seq_path(m, &file->f_path, "\n\t\\= ");
49612 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
49613 seq_printf(m, " heap");
49614 } else {
49615diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
49616index 1ccfa53..0848f95 100644
49617--- a/fs/proc/task_nommu.c
49618+++ b/fs/proc/task_nommu.c
49619@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49620 else
49621 bytes += kobjsize(mm);
49622
49623- if (current->fs && current->fs->users > 1)
49624+ if (current->fs && atomic_read(&current->fs->users) > 1)
49625 sbytes += kobjsize(current->fs);
49626 else
49627 bytes += kobjsize(current->fs);
49628@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
49629
49630 if (file) {
49631 pad_len_spaces(m, len);
49632- seq_path(m, &file->f_path, "");
49633+ seq_path(m, &file->f_path, "\n\\");
49634 } else if (mm) {
49635 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
49636
49637diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
49638index d67908b..d13f6a6 100644
49639--- a/fs/quota/netlink.c
49640+++ b/fs/quota/netlink.c
49641@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
49642 void quota_send_warning(short type, unsigned int id, dev_t dev,
49643 const char warntype)
49644 {
49645- static atomic_t seq;
49646+ static atomic_unchecked_t seq;
49647 struct sk_buff *skb;
49648 void *msg_head;
49649 int ret;
49650@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
49651 "VFS: Not enough memory to send quota warning.\n");
49652 return;
49653 }
49654- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
49655+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
49656 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
49657 if (!msg_head) {
49658 printk(KERN_ERR
49659diff --git a/fs/readdir.c b/fs/readdir.c
49660index 39e3370..20d446d 100644
49661--- a/fs/readdir.c
49662+++ b/fs/readdir.c
49663@@ -17,6 +17,7 @@
49664 #include <linux/security.h>
49665 #include <linux/syscalls.h>
49666 #include <linux/unistd.h>
49667+#include <linux/namei.h>
49668
49669 #include <asm/uaccess.h>
49670
49671@@ -67,6 +68,7 @@ struct old_linux_dirent {
49672
49673 struct readdir_callback {
49674 struct old_linux_dirent __user * dirent;
49675+ struct file * file;
49676 int result;
49677 };
49678
49679@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
49680 buf->result = -EOVERFLOW;
49681 return -EOVERFLOW;
49682 }
49683+
49684+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49685+ return 0;
49686+
49687 buf->result++;
49688 dirent = buf->dirent;
49689 if (!access_ok(VERIFY_WRITE, dirent,
49690@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
49691
49692 buf.result = 0;
49693 buf.dirent = dirent;
49694+ buf.file = file;
49695
49696 error = vfs_readdir(file, fillonedir, &buf);
49697 if (buf.result)
49698@@ -141,6 +148,7 @@ struct linux_dirent {
49699 struct getdents_callback {
49700 struct linux_dirent __user * current_dir;
49701 struct linux_dirent __user * previous;
49702+ struct file * file;
49703 int count;
49704 int error;
49705 };
49706@@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
49707 buf->error = -EOVERFLOW;
49708 return -EOVERFLOW;
49709 }
49710+
49711+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49712+ return 0;
49713+
49714 dirent = buf->previous;
49715 if (dirent) {
49716 if (__put_user(offset, &dirent->d_off))
49717@@ -208,6 +220,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
49718 buf.previous = NULL;
49719 buf.count = count;
49720 buf.error = 0;
49721+ buf.file = file;
49722
49723 error = vfs_readdir(file, filldir, &buf);
49724 if (error >= 0)
49725@@ -226,6 +239,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
49726 struct getdents_callback64 {
49727 struct linux_dirent64 __user * current_dir;
49728 struct linux_dirent64 __user * previous;
49729+ struct file *file;
49730 int count;
49731 int error;
49732 };
49733@@ -241,6 +255,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
49734 buf->error = -EINVAL; /* only used if we fail.. */
49735 if (reclen > buf->count)
49736 return -EINVAL;
49737+
49738+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49739+ return 0;
49740+
49741 dirent = buf->previous;
49742 if (dirent) {
49743 if (__put_user(offset, &dirent->d_off))
49744@@ -287,6 +305,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
49745
49746 buf.current_dir = dirent;
49747 buf.previous = NULL;
49748+ buf.file = file;
49749 buf.count = count;
49750 buf.error = 0;
49751
49752@@ -295,7 +314,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
49753 error = buf.error;
49754 lastdirent = buf.previous;
49755 if (lastdirent) {
49756- typeof(lastdirent->d_off) d_off = file->f_pos;
49757+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
49758 if (__put_user(d_off, &lastdirent->d_off))
49759 error = -EFAULT;
49760 else
49761diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
49762index 2b7882b..1c5ef48 100644
49763--- a/fs/reiserfs/do_balan.c
49764+++ b/fs/reiserfs/do_balan.c
49765@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
49766 return;
49767 }
49768
49769- atomic_inc(&(fs_generation(tb->tb_sb)));
49770+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
49771 do_balance_starts(tb);
49772
49773 /* balance leaf returns 0 except if combining L R and S into
49774diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
49775index 2c1ade6..8c59d8d 100644
49776--- a/fs/reiserfs/procfs.c
49777+++ b/fs/reiserfs/procfs.c
49778@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
49779 "SMALL_TAILS " : "NO_TAILS ",
49780 replay_only(sb) ? "REPLAY_ONLY " : "",
49781 convert_reiserfs(sb) ? "CONV " : "",
49782- atomic_read(&r->s_generation_counter),
49783+ atomic_read_unchecked(&r->s_generation_counter),
49784 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
49785 SF(s_do_balance), SF(s_unneeded_left_neighbor),
49786 SF(s_good_search_by_key_reada), SF(s_bmaps),
49787diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
49788index 33215f5..c5d427a 100644
49789--- a/fs/reiserfs/reiserfs.h
49790+++ b/fs/reiserfs/reiserfs.h
49791@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
49792 /* Comment? -Hans */
49793 wait_queue_head_t s_wait;
49794 /* To be obsoleted soon by per buffer seals.. -Hans */
49795- atomic_t s_generation_counter; // increased by one every time the
49796+ atomic_unchecked_t s_generation_counter; // increased by one every time the
49797 // tree gets re-balanced
49798 unsigned long s_properties; /* File system properties. Currently holds
49799 on-disk FS format */
49800@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
49801 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
49802
49803 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
49804-#define get_generation(s) atomic_read (&fs_generation(s))
49805+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
49806 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
49807 #define __fs_changed(gen,s) (gen != get_generation (s))
49808 #define fs_changed(gen,s) \
49809diff --git a/fs/select.c b/fs/select.c
49810index db14c78..3aae1bd 100644
49811--- a/fs/select.c
49812+++ b/fs/select.c
49813@@ -20,6 +20,7 @@
49814 #include <linux/export.h>
49815 #include <linux/slab.h>
49816 #include <linux/poll.h>
49817+#include <linux/security.h>
49818 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
49819 #include <linux/file.h>
49820 #include <linux/fdtable.h>
49821@@ -831,6 +832,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
49822 struct poll_list *walk = head;
49823 unsigned long todo = nfds;
49824
49825+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
49826 if (nfds > rlimit(RLIMIT_NOFILE))
49827 return -EINVAL;
49828
49829diff --git a/fs/seq_file.c b/fs/seq_file.c
49830index 0cbd049..64e705c 100644
49831--- a/fs/seq_file.c
49832+++ b/fs/seq_file.c
49833@@ -9,6 +9,7 @@
49834 #include <linux/export.h>
49835 #include <linux/seq_file.h>
49836 #include <linux/slab.h>
49837+#include <linux/sched.h>
49838
49839 #include <asm/uaccess.h>
49840 #include <asm/page.h>
49841@@ -56,6 +57,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
49842 memset(p, 0, sizeof(*p));
49843 mutex_init(&p->lock);
49844 p->op = op;
49845+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49846+ p->exec_id = current->exec_id;
49847+#endif
49848
49849 /*
49850 * Wrappers around seq_open(e.g. swaps_open) need to be
49851@@ -92,7 +96,7 @@ static int traverse(struct seq_file *m, loff_t offset)
49852 return 0;
49853 }
49854 if (!m->buf) {
49855- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49856+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
49857 if (!m->buf)
49858 return -ENOMEM;
49859 }
49860@@ -132,7 +136,7 @@ static int traverse(struct seq_file *m, loff_t offset)
49861 Eoverflow:
49862 m->op->stop(m, p);
49863 kfree(m->buf);
49864- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49865+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
49866 return !m->buf ? -ENOMEM : -EAGAIN;
49867 }
49868
49869@@ -187,7 +191,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
49870
49871 /* grab buffer if we didn't have one */
49872 if (!m->buf) {
49873- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49874+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
49875 if (!m->buf)
49876 goto Enomem;
49877 }
49878@@ -228,7 +232,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
49879 goto Fill;
49880 m->op->stop(m, p);
49881 kfree(m->buf);
49882- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49883+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
49884 if (!m->buf)
49885 goto Enomem;
49886 m->count = 0;
49887@@ -567,7 +571,7 @@ static void single_stop(struct seq_file *p, void *v)
49888 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
49889 void *data)
49890 {
49891- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
49892+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
49893 int res = -ENOMEM;
49894
49895 if (op) {
49896diff --git a/fs/splice.c b/fs/splice.c
49897index 7bf08fa..eb35c2f 100644
49898--- a/fs/splice.c
49899+++ b/fs/splice.c
49900@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49901 pipe_lock(pipe);
49902
49903 for (;;) {
49904- if (!pipe->readers) {
49905+ if (!atomic_read(&pipe->readers)) {
49906 send_sig(SIGPIPE, current, 0);
49907 if (!ret)
49908 ret = -EPIPE;
49909@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49910 do_wakeup = 0;
49911 }
49912
49913- pipe->waiting_writers++;
49914+ atomic_inc(&pipe->waiting_writers);
49915 pipe_wait(pipe);
49916- pipe->waiting_writers--;
49917+ atomic_dec(&pipe->waiting_writers);
49918 }
49919
49920 pipe_unlock(pipe);
49921@@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
49922 old_fs = get_fs();
49923 set_fs(get_ds());
49924 /* The cast to a user pointer is valid due to the set_fs() */
49925- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
49926+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
49927 set_fs(old_fs);
49928
49929 return res;
49930@@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
49931 old_fs = get_fs();
49932 set_fs(get_ds());
49933 /* The cast to a user pointer is valid due to the set_fs() */
49934- res = vfs_write(file, (const char __user *)buf, count, &pos);
49935+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
49936 set_fs(old_fs);
49937
49938 return res;
49939@@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49940 goto err;
49941
49942 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
49943- vec[i].iov_base = (void __user *) page_address(page);
49944+ vec[i].iov_base = (void __force_user *) page_address(page);
49945 vec[i].iov_len = this_len;
49946 spd.pages[i] = page;
49947 spd.nr_pages++;
49948@@ -849,10 +849,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
49949 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
49950 {
49951 while (!pipe->nrbufs) {
49952- if (!pipe->writers)
49953+ if (!atomic_read(&pipe->writers))
49954 return 0;
49955
49956- if (!pipe->waiting_writers && sd->num_spliced)
49957+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
49958 return 0;
49959
49960 if (sd->flags & SPLICE_F_NONBLOCK)
49961@@ -1187,7 +1187,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
49962 * out of the pipe right after the splice_to_pipe(). So set
49963 * PIPE_READERS appropriately.
49964 */
49965- pipe->readers = 1;
49966+ atomic_set(&pipe->readers, 1);
49967
49968 current->splice_pipe = pipe;
49969 }
49970@@ -1740,9 +1740,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49971 ret = -ERESTARTSYS;
49972 break;
49973 }
49974- if (!pipe->writers)
49975+ if (!atomic_read(&pipe->writers))
49976 break;
49977- if (!pipe->waiting_writers) {
49978+ if (!atomic_read(&pipe->waiting_writers)) {
49979 if (flags & SPLICE_F_NONBLOCK) {
49980 ret = -EAGAIN;
49981 break;
49982@@ -1774,7 +1774,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49983 pipe_lock(pipe);
49984
49985 while (pipe->nrbufs >= pipe->buffers) {
49986- if (!pipe->readers) {
49987+ if (!atomic_read(&pipe->readers)) {
49988 send_sig(SIGPIPE, current, 0);
49989 ret = -EPIPE;
49990 break;
49991@@ -1787,9 +1787,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49992 ret = -ERESTARTSYS;
49993 break;
49994 }
49995- pipe->waiting_writers++;
49996+ atomic_inc(&pipe->waiting_writers);
49997 pipe_wait(pipe);
49998- pipe->waiting_writers--;
49999+ atomic_dec(&pipe->waiting_writers);
50000 }
50001
50002 pipe_unlock(pipe);
50003@@ -1825,14 +1825,14 @@ retry:
50004 pipe_double_lock(ipipe, opipe);
50005
50006 do {
50007- if (!opipe->readers) {
50008+ if (!atomic_read(&opipe->readers)) {
50009 send_sig(SIGPIPE, current, 0);
50010 if (!ret)
50011 ret = -EPIPE;
50012 break;
50013 }
50014
50015- if (!ipipe->nrbufs && !ipipe->writers)
50016+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
50017 break;
50018
50019 /*
50020@@ -1929,7 +1929,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
50021 pipe_double_lock(ipipe, opipe);
50022
50023 do {
50024- if (!opipe->readers) {
50025+ if (!atomic_read(&opipe->readers)) {
50026 send_sig(SIGPIPE, current, 0);
50027 if (!ret)
50028 ret = -EPIPE;
50029@@ -1974,7 +1974,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
50030 * return EAGAIN if we have the potential of some data in the
50031 * future, otherwise just return 0
50032 */
50033- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
50034+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
50035 ret = -EAGAIN;
50036
50037 pipe_unlock(ipipe);
50038diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
50039index e6bb9b2..d8e3951 100644
50040--- a/fs/sysfs/dir.c
50041+++ b/fs/sysfs/dir.c
50042@@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
50043 struct sysfs_dirent *sd;
50044 int rc;
50045
50046+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
50047+ const char *parent_name = parent_sd->s_name;
50048+
50049+ mode = S_IFDIR | S_IRWXU;
50050+
50051+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
50052+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
50053+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
50054+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
50055+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
50056+#endif
50057+
50058 /* allocate */
50059 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
50060 if (!sd)
50061diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
50062index 00012e3..8392349 100644
50063--- a/fs/sysfs/file.c
50064+++ b/fs/sysfs/file.c
50065@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
50066
50067 struct sysfs_open_dirent {
50068 atomic_t refcnt;
50069- atomic_t event;
50070+ atomic_unchecked_t event;
50071 wait_queue_head_t poll;
50072 struct list_head buffers; /* goes through sysfs_buffer.list */
50073 };
50074@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
50075 if (!sysfs_get_active(attr_sd))
50076 return -ENODEV;
50077
50078- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
50079+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
50080 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
50081
50082 sysfs_put_active(attr_sd);
50083@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
50084 return -ENOMEM;
50085
50086 atomic_set(&new_od->refcnt, 0);
50087- atomic_set(&new_od->event, 1);
50088+ atomic_set_unchecked(&new_od->event, 1);
50089 init_waitqueue_head(&new_od->poll);
50090 INIT_LIST_HEAD(&new_od->buffers);
50091 goto retry;
50092@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
50093
50094 sysfs_put_active(attr_sd);
50095
50096- if (buffer->event != atomic_read(&od->event))
50097+ if (buffer->event != atomic_read_unchecked(&od->event))
50098 goto trigger;
50099
50100 return DEFAULT_POLLMASK;
50101@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
50102
50103 od = sd->s_attr.open;
50104 if (od) {
50105- atomic_inc(&od->event);
50106+ atomic_inc_unchecked(&od->event);
50107 wake_up_interruptible(&od->poll);
50108 }
50109
50110diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
50111index a7ac78f..02158e1 100644
50112--- a/fs/sysfs/symlink.c
50113+++ b/fs/sysfs/symlink.c
50114@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
50115
50116 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
50117 {
50118- char *page = nd_get_link(nd);
50119+ const char *page = nd_get_link(nd);
50120 if (!IS_ERR(page))
50121 free_page((unsigned long)page);
50122 }
50123diff --git a/fs/udf/misc.c b/fs/udf/misc.c
50124index c175b4d..8f36a16 100644
50125--- a/fs/udf/misc.c
50126+++ b/fs/udf/misc.c
50127@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
50128
50129 u8 udf_tag_checksum(const struct tag *t)
50130 {
50131- u8 *data = (u8 *)t;
50132+ const u8 *data = (const u8 *)t;
50133 u8 checksum = 0;
50134 int i;
50135 for (i = 0; i < sizeof(struct tag); ++i)
50136diff --git a/fs/udf/namei.c b/fs/udf/namei.c
50137index 1802417..c31deb3 100644
50138--- a/fs/udf/namei.c
50139+++ b/fs/udf/namei.c
50140@@ -1279,6 +1279,7 @@ static int udf_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
50141 *lenp = 3;
50142 fid->udf.block = location.logicalBlockNum;
50143 fid->udf.partref = location.partitionReferenceNum;
50144+ fid->udf.parent_partref = 0;
50145 fid->udf.generation = inode->i_generation;
50146
50147 if (parent) {
50148diff --git a/fs/utimes.c b/fs/utimes.c
50149index fa4dbe4..e12d1b9 100644
50150--- a/fs/utimes.c
50151+++ b/fs/utimes.c
50152@@ -1,6 +1,7 @@
50153 #include <linux/compiler.h>
50154 #include <linux/file.h>
50155 #include <linux/fs.h>
50156+#include <linux/security.h>
50157 #include <linux/linkage.h>
50158 #include <linux/mount.h>
50159 #include <linux/namei.h>
50160@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
50161 goto mnt_drop_write_and_out;
50162 }
50163 }
50164+
50165+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
50166+ error = -EACCES;
50167+ goto mnt_drop_write_and_out;
50168+ }
50169+
50170 mutex_lock(&inode->i_mutex);
50171 error = notify_change(path->dentry, &newattrs);
50172 mutex_unlock(&inode->i_mutex);
50173diff --git a/fs/xattr.c b/fs/xattr.c
50174index 1d7ac37..23cb9ec 100644
50175--- a/fs/xattr.c
50176+++ b/fs/xattr.c
50177@@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
50178 * Extended attribute SET operations
50179 */
50180 static long
50181-setxattr(struct dentry *d, const char __user *name, const void __user *value,
50182+setxattr(struct path *path, const char __user *name, const void __user *value,
50183 size_t size, int flags)
50184 {
50185 int error;
50186@@ -349,7 +349,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
50187 }
50188 }
50189
50190- error = vfs_setxattr(d, kname, kvalue, size, flags);
50191+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
50192+ error = -EACCES;
50193+ goto out;
50194+ }
50195+
50196+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
50197 out:
50198 if (vvalue)
50199 vfree(vvalue);
50200@@ -370,7 +375,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
50201 return error;
50202 error = mnt_want_write(path.mnt);
50203 if (!error) {
50204- error = setxattr(path.dentry, name, value, size, flags);
50205+ error = setxattr(&path, name, value, size, flags);
50206 mnt_drop_write(path.mnt);
50207 }
50208 path_put(&path);
50209@@ -389,7 +394,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
50210 return error;
50211 error = mnt_want_write(path.mnt);
50212 if (!error) {
50213- error = setxattr(path.dentry, name, value, size, flags);
50214+ error = setxattr(&path, name, value, size, flags);
50215 mnt_drop_write(path.mnt);
50216 }
50217 path_put(&path);
50218@@ -401,17 +406,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
50219 {
50220 int fput_needed;
50221 struct file *f;
50222- struct dentry *dentry;
50223 int error = -EBADF;
50224
50225 f = fget_light(fd, &fput_needed);
50226 if (!f)
50227 return error;
50228- dentry = f->f_path.dentry;
50229- audit_inode(NULL, dentry);
50230+ audit_inode(NULL, f->f_path.dentry);
50231 error = mnt_want_write_file(f);
50232 if (!error) {
50233- error = setxattr(dentry, name, value, size, flags);
50234+ error = setxattr(&f->f_path, name, value, size, flags);
50235 mnt_drop_write_file(f);
50236 }
50237 fput_light(f, fput_needed);
50238diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
50239index 69d06b0..c0996e5 100644
50240--- a/fs/xattr_acl.c
50241+++ b/fs/xattr_acl.c
50242@@ -17,8 +17,8 @@
50243 struct posix_acl *
50244 posix_acl_from_xattr(const void *value, size_t size)
50245 {
50246- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
50247- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
50248+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
50249+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
50250 int count;
50251 struct posix_acl *acl;
50252 struct posix_acl_entry *acl_e;
50253diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
50254index 58b815e..595ddee 100644
50255--- a/fs/xfs/xfs_bmap.c
50256+++ b/fs/xfs/xfs_bmap.c
50257@@ -189,7 +189,7 @@ xfs_bmap_validate_ret(
50258 int nmap,
50259 int ret_nmap);
50260 #else
50261-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
50262+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
50263 #endif /* DEBUG */
50264
50265 STATIC int
50266diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
50267index 19bf0c5..9f26b02 100644
50268--- a/fs/xfs/xfs_dir2_sf.c
50269+++ b/fs/xfs/xfs_dir2_sf.c
50270@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
50271 }
50272
50273 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
50274- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
50275+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
50276+ char name[sfep->namelen];
50277+ memcpy(name, sfep->name, sfep->namelen);
50278+ if (filldir(dirent, name, sfep->namelen,
50279+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
50280+ *offset = off & 0x7fffffff;
50281+ return 0;
50282+ }
50283+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
50284 off & 0x7fffffff, ino, DT_UNKNOWN)) {
50285 *offset = off & 0x7fffffff;
50286 return 0;
50287diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
50288index f9c3fe3..69cf4fc 100644
50289--- a/fs/xfs/xfs_discard.c
50290+++ b/fs/xfs/xfs_discard.c
50291@@ -179,12 +179,14 @@ xfs_ioc_trim(
50292 * used by the fstrim application. In the end it really doesn't
50293 * matter as trimming blocks is an advisory interface.
50294 */
50295+ if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
50296+ range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)))
50297+ return -XFS_ERROR(EINVAL);
50298+
50299 start = BTOBB(range.start);
50300 end = start + BTOBBT(range.len) - 1;
50301 minlen = BTOBB(max_t(u64, granularity, range.minlen));
50302
50303- if (XFS_BB_TO_FSB(mp, start) >= mp->m_sb.sb_dblocks)
50304- return -XFS_ERROR(EINVAL);
50305 if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1)
50306 end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1;
50307
50308diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
50309index 3a05a41..320bec6 100644
50310--- a/fs/xfs/xfs_ioctl.c
50311+++ b/fs/xfs/xfs_ioctl.c
50312@@ -126,7 +126,7 @@ xfs_find_handle(
50313 }
50314
50315 error = -EFAULT;
50316- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
50317+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
50318 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
50319 goto out_put;
50320
50321diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
50322index 1a25fd8..e935581 100644
50323--- a/fs/xfs/xfs_iops.c
50324+++ b/fs/xfs/xfs_iops.c
50325@@ -394,7 +394,7 @@ xfs_vn_put_link(
50326 struct nameidata *nd,
50327 void *p)
50328 {
50329- char *s = nd_get_link(nd);
50330+ const char *s = nd_get_link(nd);
50331
50332 if (!IS_ERR(s))
50333 kfree(s);
50334diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
50335index 92d4331..ca28a4b 100644
50336--- a/fs/xfs/xfs_rtalloc.c
50337+++ b/fs/xfs/xfs_rtalloc.c
50338@@ -857,7 +857,7 @@ xfs_rtbuf_get(
50339 xfs_buf_t *bp; /* block buffer, result */
50340 xfs_inode_t *ip; /* bitmap or summary inode */
50341 xfs_bmbt_irec_t map;
50342- int nmap;
50343+ int nmap = 1;
50344 int error; /* error value */
50345
50346 ip = issum ? mp->m_rsumip : mp->m_rbmip;
50347diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
50348new file mode 100644
50349index 0000000..4d533f1
50350--- /dev/null
50351+++ b/grsecurity/Kconfig
50352@@ -0,0 +1,941 @@
50353+#
50354+# grecurity configuration
50355+#
50356+menu "Memory Protections"
50357+depends on GRKERNSEC
50358+
50359+config GRKERNSEC_KMEM
50360+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
50361+ default y if GRKERNSEC_CONFIG_AUTO
50362+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50363+ help
50364+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50365+ be written to or read from to modify or leak the contents of the running
50366+ kernel. /dev/port will also not be allowed to be opened. If you have module
50367+ support disabled, enabling this will close up four ways that are
50368+ currently used to insert malicious code into the running kernel.
50369+ Even with all these features enabled, we still highly recommend that
50370+ you use the RBAC system, as it is still possible for an attacker to
50371+ modify the running kernel through privileged I/O granted by ioperm/iopl.
50372+ If you are not using XFree86, you may be able to stop this additional
50373+ case by enabling the 'Disable privileged I/O' option. Though nothing
50374+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50375+ but only to video memory, which is the only writing we allow in this
50376+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50377+ not be allowed to mprotect it with PROT_WRITE later.
50378+ It is highly recommended that you say Y here if you meet all the
50379+ conditions above.
50380+
50381+config GRKERNSEC_VM86
50382+ bool "Restrict VM86 mode"
50383+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
50384+ depends on X86_32
50385+
50386+ help
50387+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50388+ make use of a special execution mode on 32bit x86 processors called
50389+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50390+ video cards and will still work with this option enabled. The purpose
50391+ of the option is to prevent exploitation of emulation errors in
50392+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
50393+ Nearly all users should be able to enable this option.
50394+
50395+config GRKERNSEC_IO
50396+ bool "Disable privileged I/O"
50397+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
50398+ depends on X86
50399+ select RTC_CLASS
50400+ select RTC_INTF_DEV
50401+ select RTC_DRV_CMOS
50402+
50403+ help
50404+ If you say Y here, all ioperm and iopl calls will return an error.
50405+ Ioperm and iopl can be used to modify the running kernel.
50406+ Unfortunately, some programs need this access to operate properly,
50407+ the most notable of which are XFree86 and hwclock. hwclock can be
50408+ remedied by having RTC support in the kernel, so real-time
50409+ clock support is enabled if this option is enabled, to ensure
50410+ that hwclock operates correctly. XFree86 still will not
50411+ operate correctly with this option enabled, so DO NOT CHOOSE Y
50412+ IF YOU USE XFree86. If you use XFree86 and you still want to
50413+ protect your kernel against modification, use the RBAC system.
50414+
50415+config GRKERNSEC_PROC_MEMMAP
50416+ bool "Harden ASLR against information leaks and entropy reduction"
50417+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
50418+ depends on PAX_NOEXEC || PAX_ASLR
50419+ help
50420+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50421+ give no information about the addresses of its mappings if
50422+ PaX features that rely on random addresses are enabled on the task.
50423+ In addition to sanitizing this information and disabling other
50424+ dangerous sources of information, this option causes reads of sensitive
50425+ /proc/<pid> entries where the file descriptor was opened in a different
50426+ task than the one performing the read. Such attempts are logged.
50427+ This option also limits argv/env strings for suid/sgid binaries
50428+ to 512KB to prevent a complete exhaustion of the stack entropy provided
50429+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
50430+ binaries to prevent alternative mmap layouts from being abused.
50431+
50432+ If you use PaX it is essential that you say Y here as it closes up
50433+ several holes that make full ASLR useless locally.
50434+
50435+config GRKERNSEC_BRUTE
50436+ bool "Deter exploit bruteforcing"
50437+ default y if GRKERNSEC_CONFIG_AUTO
50438+ help
50439+ If you say Y here, attempts to bruteforce exploits against forking
50440+ daemons such as apache or sshd, as well as against suid/sgid binaries
50441+ will be deterred. When a child of a forking daemon is killed by PaX
50442+ or crashes due to an illegal instruction or other suspicious signal,
50443+ the parent process will be delayed 30 seconds upon every subsequent
50444+ fork until the administrator is able to assess the situation and
50445+ restart the daemon.
50446+ In the suid/sgid case, the attempt is logged, the user has all their
50447+ processes terminated, and they are prevented from executing any further
50448+ processes for 15 minutes.
50449+ It is recommended that you also enable signal logging in the auditing
50450+ section so that logs are generated when a process triggers a suspicious
50451+ signal.
50452+ If the sysctl option is enabled, a sysctl option with name
50453+ "deter_bruteforce" is created.
50454+
50455+
50456+config GRKERNSEC_MODHARDEN
50457+ bool "Harden module auto-loading"
50458+ default y if GRKERNSEC_CONFIG_AUTO
50459+ depends on MODULES
50460+ help
50461+ If you say Y here, module auto-loading in response to use of some
50462+ feature implemented by an unloaded module will be restricted to
50463+ root users. Enabling this option helps defend against attacks
50464+ by unprivileged users who abuse the auto-loading behavior to
50465+ cause a vulnerable module to load that is then exploited.
50466+
50467+ If this option prevents a legitimate use of auto-loading for a
50468+ non-root user, the administrator can execute modprobe manually
50469+ with the exact name of the module mentioned in the alert log.
50470+ Alternatively, the administrator can add the module to the list
50471+ of modules loaded at boot by modifying init scripts.
50472+
50473+ Modification of init scripts will most likely be needed on
50474+ Ubuntu servers with encrypted home directory support enabled,
50475+ as the first non-root user logging in will cause the ecb(aes),
50476+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50477+
50478+config GRKERNSEC_HIDESYM
50479+ bool "Hide kernel symbols"
50480+ default y if GRKERNSEC_CONFIG_AUTO
50481+ select PAX_USERCOPY_SLABS
50482+ help
50483+ If you say Y here, getting information on loaded modules, and
50484+ displaying all kernel symbols through a syscall will be restricted
50485+ to users with CAP_SYS_MODULE. For software compatibility reasons,
50486+ /proc/kallsyms will be restricted to the root user. The RBAC
50487+ system can hide that entry even from root.
50488+
50489+ This option also prevents leaking of kernel addresses through
50490+ several /proc entries.
50491+
50492+ Note that this option is only effective provided the following
50493+ conditions are met:
50494+ 1) The kernel using grsecurity is not precompiled by some distribution
50495+ 2) You have also enabled GRKERNSEC_DMESG
50496+ 3) You are using the RBAC system and hiding other files such as your
50497+ kernel image and System.map. Alternatively, enabling this option
50498+ causes the permissions on /boot, /lib/modules, and the kernel
50499+ source directory to change at compile time to prevent
50500+ reading by non-root users.
50501+ If the above conditions are met, this option will aid in providing a
50502+ useful protection against local kernel exploitation of overflows
50503+ and arbitrary read/write vulnerabilities.
50504+
50505+config GRKERNSEC_KERN_LOCKOUT
50506+ bool "Active kernel exploit response"
50507+ default y if GRKERNSEC_CONFIG_AUTO
50508+ depends on X86 || ARM || PPC || SPARC
50509+ help
50510+ If you say Y here, when a PaX alert is triggered due to suspicious
50511+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50512+ or an OOPS occurs due to bad memory accesses, instead of just
50513+ terminating the offending process (and potentially allowing
50514+ a subsequent exploit from the same user), we will take one of two
50515+ actions:
50516+ If the user was root, we will panic the system
50517+ If the user was non-root, we will log the attempt, terminate
50518+ all processes owned by the user, then prevent them from creating
50519+ any new processes until the system is restarted
50520+ This deters repeated kernel exploitation/bruteforcing attempts
50521+ and is useful for later forensics.
50522+
50523+endmenu
50524+menu "Role Based Access Control Options"
50525+depends on GRKERNSEC
50526+
50527+config GRKERNSEC_RBAC_DEBUG
50528+ bool
50529+
50530+config GRKERNSEC_NO_RBAC
50531+ bool "Disable RBAC system"
50532+ help
50533+ If you say Y here, the /dev/grsec device will be removed from the kernel,
50534+ preventing the RBAC system from being enabled. You should only say Y
50535+ here if you have no intention of using the RBAC system, so as to prevent
50536+ an attacker with root access from misusing the RBAC system to hide files
50537+ and processes when loadable module support and /dev/[k]mem have been
50538+ locked down.
50539+
50540+config GRKERNSEC_ACL_HIDEKERN
50541+ bool "Hide kernel processes"
50542+ help
50543+ If you say Y here, all kernel threads will be hidden to all
50544+ processes but those whose subject has the "view hidden processes"
50545+ flag.
50546+
50547+config GRKERNSEC_ACL_MAXTRIES
50548+ int "Maximum tries before password lockout"
50549+ default 3
50550+ help
50551+ This option enforces the maximum number of times a user can attempt
50552+ to authorize themselves with the grsecurity RBAC system before being
50553+ denied the ability to attempt authorization again for a specified time.
50554+ The lower the number, the harder it will be to brute-force a password.
50555+
50556+config GRKERNSEC_ACL_TIMEOUT
50557+ int "Time to wait after max password tries, in seconds"
50558+ default 30
50559+ help
50560+ This option specifies the time the user must wait after attempting to
50561+ authorize to the RBAC system with the maximum number of invalid
50562+ passwords. The higher the number, the harder it will be to brute-force
50563+ a password.
50564+
50565+endmenu
50566+menu "Filesystem Protections"
50567+depends on GRKERNSEC
50568+
50569+config GRKERNSEC_PROC
50570+ bool "Proc restrictions"
50571+ default y if GRKERNSEC_CONFIG_AUTO
50572+ help
50573+ If you say Y here, the permissions of the /proc filesystem
50574+ will be altered to enhance system security and privacy. You MUST
50575+ choose either a user only restriction or a user and group restriction.
50576+ Depending upon the option you choose, you can either restrict users to
50577+ see only the processes they themselves run, or choose a group that can
50578+ view all processes and files normally restricted to root if you choose
50579+ the "restrict to user only" option. NOTE: If you're running identd or
50580+ ntpd as a non-root user, you will have to run it as the group you
50581+ specify here.
50582+
50583+config GRKERNSEC_PROC_USER
50584+ bool "Restrict /proc to user only"
50585+ depends on GRKERNSEC_PROC
50586+ help
50587+ If you say Y here, non-root users will only be able to view their own
50588+ processes, and restricts them from viewing network-related information,
50589+ and viewing kernel symbol and module information.
50590+
50591+config GRKERNSEC_PROC_USERGROUP
50592+ bool "Allow special group"
50593+ default y if GRKERNSEC_CONFIG_AUTO
50594+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50595+ help
50596+ If you say Y here, you will be able to select a group that will be
50597+ able to view all processes and network-related information. If you've
50598+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50599+ remain hidden. This option is useful if you want to run identd as
50600+ a non-root user.
50601+
50602+config GRKERNSEC_PROC_GID
50603+ int "GID for special group"
50604+ depends on GRKERNSEC_PROC_USERGROUP
50605+ default 1001
50606+
50607+config GRKERNSEC_PROC_ADD
50608+ bool "Additional restrictions"
50609+ default y if GRKERNSEC_CONFIG_AUTO
50610+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50611+ help
50612+ If you say Y here, additional restrictions will be placed on
50613+ /proc that keep normal users from viewing device information and
50614+ slabinfo information that could be useful for exploits.
50615+
50616+config GRKERNSEC_LINK
50617+ bool "Linking restrictions"
50618+ default y if GRKERNSEC_CONFIG_AUTO
50619+ help
50620+ If you say Y here, /tmp race exploits will be prevented, since users
50621+ will no longer be able to follow symlinks owned by other users in
50622+ world-writable +t directories (e.g. /tmp), unless the owner of the
50623+ symlink is the owner of the directory. users will also not be
50624+ able to hardlink to files they do not own. If the sysctl option is
50625+ enabled, a sysctl option with name "linking_restrictions" is created.
50626+
50627+config GRKERNSEC_SYMLINKOWN
50628+ bool "Kernel-enforced SymlinksIfOwnerMatch"
50629+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
50630+ help
50631+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
50632+ that prevents it from being used as a security feature. As Apache
50633+ verifies the symlink by performing a stat() against the target of
50634+ the symlink before it is followed, an attacker can setup a symlink
50635+ to point to a same-owned file, then replace the symlink with one
50636+ that targets another user's file just after Apache "validates" the
50637+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
50638+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
50639+ will be in place for the group you specify. If the sysctl option
50640+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
50641+ created.
50642+
50643+config GRKERNSEC_SYMLINKOWN_GID
50644+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
50645+ depends on GRKERNSEC_SYMLINKOWN
50646+ default 1006
50647+ help
50648+ Setting this GID determines what group kernel-enforced
50649+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
50650+ is enabled, a sysctl option with name "symlinkown_gid" is created.
50651+
50652+config GRKERNSEC_FIFO
50653+ bool "FIFO restrictions"
50654+ default y if GRKERNSEC_CONFIG_AUTO
50655+ help
50656+ If you say Y here, users will not be able to write to FIFOs they don't
50657+ own in world-writable +t directories (e.g. /tmp), unless the owner of
50658+ the FIFO is the same owner of the directory it's held in. If the sysctl
50659+ option is enabled, a sysctl option with name "fifo_restrictions" is
50660+ created.
50661+
50662+config GRKERNSEC_SYSFS_RESTRICT
50663+ bool "Sysfs/debugfs restriction"
50664+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
50665+ depends on SYSFS
50666+ help
50667+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50668+ any filesystem normally mounted under it (e.g. debugfs) will be
50669+ mostly accessible only by root. These filesystems generally provide access
50670+ to hardware and debug information that isn't appropriate for unprivileged
50671+ users of the system. Sysfs and debugfs have also become a large source
50672+ of new vulnerabilities, ranging from infoleaks to local compromise.
50673+ There has been very little oversight with an eye toward security involved
50674+ in adding new exporters of information to these filesystems, so their
50675+ use is discouraged.
50676+ For reasons of compatibility, a few directories have been whitelisted
50677+ for access by non-root users:
50678+ /sys/fs/selinux
50679+ /sys/fs/fuse
50680+ /sys/devices/system/cpu
50681+
50682+config GRKERNSEC_ROFS
50683+ bool "Runtime read-only mount protection"
50684+ help
50685+ If you say Y here, a sysctl option with name "romount_protect" will
50686+ be created. By setting this option to 1 at runtime, filesystems
50687+ will be protected in the following ways:
50688+ * No new writable mounts will be allowed
50689+ * Existing read-only mounts won't be able to be remounted read/write
50690+ * Write operations will be denied on all block devices
50691+ This option acts independently of grsec_lock: once it is set to 1,
50692+ it cannot be turned off. Therefore, please be mindful of the resulting
50693+ behavior if this option is enabled in an init script on a read-only
50694+ filesystem. This feature is mainly intended for secure embedded systems.
50695+
50696+config GRKERNSEC_CHROOT
50697+ bool "Chroot jail restrictions"
50698+ default y if GRKERNSEC_CONFIG_AUTO
50699+ help
50700+ If you say Y here, you will be able to choose several options that will
50701+ make breaking out of a chrooted jail much more difficult. If you
50702+ encounter no software incompatibilities with the following options, it
50703+ is recommended that you enable each one.
50704+
50705+config GRKERNSEC_CHROOT_MOUNT
50706+ bool "Deny mounts"
50707+ default y if GRKERNSEC_CONFIG_AUTO
50708+ depends on GRKERNSEC_CHROOT
50709+ help
50710+ If you say Y here, processes inside a chroot will not be able to
50711+ mount or remount filesystems. If the sysctl option is enabled, a
50712+ sysctl option with name "chroot_deny_mount" is created.
50713+
50714+config GRKERNSEC_CHROOT_DOUBLE
50715+ bool "Deny double-chroots"
50716+ default y if GRKERNSEC_CONFIG_AUTO
50717+ depends on GRKERNSEC_CHROOT
50718+ help
50719+ If you say Y here, processes inside a chroot will not be able to chroot
50720+ again outside the chroot. This is a widely used method of breaking
50721+ out of a chroot jail and should not be allowed. If the sysctl
50722+ option is enabled, a sysctl option with name
50723+ "chroot_deny_chroot" is created.
50724+
50725+config GRKERNSEC_CHROOT_PIVOT
50726+ bool "Deny pivot_root in chroot"
50727+ default y if GRKERNSEC_CONFIG_AUTO
50728+ depends on GRKERNSEC_CHROOT
50729+ help
50730+ If you say Y here, processes inside a chroot will not be able to use
50731+ a function called pivot_root() that was introduced in Linux 2.3.41. It
50732+ works similar to chroot in that it changes the root filesystem. This
50733+ function could be misused in a chrooted process to attempt to break out
50734+ of the chroot, and therefore should not be allowed. If the sysctl
50735+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
50736+ created.
50737+
50738+config GRKERNSEC_CHROOT_CHDIR
50739+ bool "Enforce chdir(\"/\") on all chroots"
50740+ default y if GRKERNSEC_CONFIG_AUTO
50741+ depends on GRKERNSEC_CHROOT
50742+ help
50743+ If you say Y here, the current working directory of all newly-chrooted
50744+ applications will be set to the the root directory of the chroot.
50745+ The man page on chroot(2) states:
50746+ Note that this call does not change the current working
50747+ directory, so that `.' can be outside the tree rooted at
50748+ `/'. In particular, the super-user can escape from a
50749+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50750+
50751+ It is recommended that you say Y here, since it's not known to break
50752+ any software. If the sysctl option is enabled, a sysctl option with
50753+ name "chroot_enforce_chdir" is created.
50754+
50755+config GRKERNSEC_CHROOT_CHMOD
50756+ bool "Deny (f)chmod +s"
50757+ default y if GRKERNSEC_CONFIG_AUTO
50758+ depends on GRKERNSEC_CHROOT
50759+ help
50760+ If you say Y here, processes inside a chroot will not be able to chmod
50761+ or fchmod files to make them have suid or sgid bits. This protects
50762+ against another published method of breaking a chroot. If the sysctl
50763+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
50764+ created.
50765+
50766+config GRKERNSEC_CHROOT_FCHDIR
50767+ bool "Deny fchdir out of chroot"
50768+ default y if GRKERNSEC_CONFIG_AUTO
50769+ depends on GRKERNSEC_CHROOT
50770+ help
50771+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
50772+ to a file descriptor of the chrooting process that points to a directory
50773+ outside the filesystem will be stopped. If the sysctl option
50774+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50775+
50776+config GRKERNSEC_CHROOT_MKNOD
50777+ bool "Deny mknod"
50778+ default y if GRKERNSEC_CONFIG_AUTO
50779+ depends on GRKERNSEC_CHROOT
50780+ help
50781+ If you say Y here, processes inside a chroot will not be allowed to
50782+ mknod. The problem with using mknod inside a chroot is that it
50783+ would allow an attacker to create a device entry that is the same
50784+ as one on the physical root of your system, which could range from
50785+ anything from the console device to a device for your harddrive (which
50786+ they could then use to wipe the drive or steal data). It is recommended
50787+ that you say Y here, unless you run into software incompatibilities.
50788+ If the sysctl option is enabled, a sysctl option with name
50789+ "chroot_deny_mknod" is created.
50790+
50791+config GRKERNSEC_CHROOT_SHMAT
50792+ bool "Deny shmat() out of chroot"
50793+ default y if GRKERNSEC_CONFIG_AUTO
50794+ depends on GRKERNSEC_CHROOT
50795+ help
50796+ If you say Y here, processes inside a chroot will not be able to attach
50797+ to shared memory segments that were created outside of the chroot jail.
50798+ It is recommended that you say Y here. If the sysctl option is enabled,
50799+ a sysctl option with name "chroot_deny_shmat" is created.
50800+
50801+config GRKERNSEC_CHROOT_UNIX
50802+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
50803+ default y if GRKERNSEC_CONFIG_AUTO
50804+ depends on GRKERNSEC_CHROOT
50805+ help
50806+ If you say Y here, processes inside a chroot will not be able to
50807+ connect to abstract (meaning not belonging to a filesystem) Unix
50808+ domain sockets that were bound outside of a chroot. It is recommended
50809+ that you say Y here. If the sysctl option is enabled, a sysctl option
50810+ with name "chroot_deny_unix" is created.
50811+
50812+config GRKERNSEC_CHROOT_FINDTASK
50813+ bool "Protect outside processes"
50814+ default y if GRKERNSEC_CONFIG_AUTO
50815+ depends on GRKERNSEC_CHROOT
50816+ help
50817+ If you say Y here, processes inside a chroot will not be able to
50818+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50819+ getsid, or view any process outside of the chroot. If the sysctl
50820+ option is enabled, a sysctl option with name "chroot_findtask" is
50821+ created.
50822+
50823+config GRKERNSEC_CHROOT_NICE
50824+ bool "Restrict priority changes"
50825+ default y if GRKERNSEC_CONFIG_AUTO
50826+ depends on GRKERNSEC_CHROOT
50827+ help
50828+ If you say Y here, processes inside a chroot will not be able to raise
50829+ the priority of processes in the chroot, or alter the priority of
50830+ processes outside the chroot. This provides more security than simply
50831+ removing CAP_SYS_NICE from the process' capability set. If the
50832+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50833+ is created.
50834+
50835+config GRKERNSEC_CHROOT_SYSCTL
50836+ bool "Deny sysctl writes"
50837+ default y if GRKERNSEC_CONFIG_AUTO
50838+ depends on GRKERNSEC_CHROOT
50839+ help
50840+ If you say Y here, an attacker in a chroot will not be able to
50841+ write to sysctl entries, either by sysctl(2) or through a /proc
50842+ interface. It is strongly recommended that you say Y here. If the
50843+ sysctl option is enabled, a sysctl option with name
50844+ "chroot_deny_sysctl" is created.
50845+
50846+config GRKERNSEC_CHROOT_CAPS
50847+ bool "Capability restrictions"
50848+ default y if GRKERNSEC_CONFIG_AUTO
50849+ depends on GRKERNSEC_CHROOT
50850+ help
50851+ If you say Y here, the capabilities on all processes within a
50852+ chroot jail will be lowered to stop module insertion, raw i/o,
50853+ system and net admin tasks, rebooting the system, modifying immutable
50854+ files, modifying IPC owned by another, and changing the system time.
50855+ This is left an option because it can break some apps. Disable this
50856+ if your chrooted apps are having problems performing those kinds of
50857+ tasks. If the sysctl option is enabled, a sysctl option with
50858+ name "chroot_caps" is created.
50859+
50860+endmenu
50861+menu "Kernel Auditing"
50862+depends on GRKERNSEC
50863+
50864+config GRKERNSEC_AUDIT_GROUP
50865+ bool "Single group for auditing"
50866+ help
50867+ If you say Y here, the exec, chdir, and (un)mount logging features
50868+ will only operate on a group you specify. This option is recommended
50869+ if you only want to watch certain users instead of having a large
50870+ amount of logs from the entire system. If the sysctl option is enabled,
50871+ a sysctl option with name "audit_group" is created.
50872+
50873+config GRKERNSEC_AUDIT_GID
50874+ int "GID for auditing"
50875+ depends on GRKERNSEC_AUDIT_GROUP
50876+ default 1007
50877+
50878+config GRKERNSEC_EXECLOG
50879+ bool "Exec logging"
50880+ help
50881+ If you say Y here, all execve() calls will be logged (since the
50882+ other exec*() calls are frontends to execve(), all execution
50883+ will be logged). Useful for shell-servers that like to keep track
50884+ of their users. If the sysctl option is enabled, a sysctl option with
50885+ name "exec_logging" is created.
50886+ WARNING: This option when enabled will produce a LOT of logs, especially
50887+ on an active system.
50888+
50889+config GRKERNSEC_RESLOG
50890+ bool "Resource logging"
50891+ default y if GRKERNSEC_CONFIG_AUTO
50892+ help
50893+ If you say Y here, all attempts to overstep resource limits will
50894+ be logged with the resource name, the requested size, and the current
50895+ limit. It is highly recommended that you say Y here. If the sysctl
50896+ option is enabled, a sysctl option with name "resource_logging" is
50897+ created. If the RBAC system is enabled, the sysctl value is ignored.
50898+
50899+config GRKERNSEC_CHROOT_EXECLOG
50900+ bool "Log execs within chroot"
50901+ help
50902+ If you say Y here, all executions inside a chroot jail will be logged
50903+ to syslog. This can cause a large amount of logs if certain
50904+ applications (eg. djb's daemontools) are installed on the system, and
50905+ is therefore left as an option. If the sysctl option is enabled, a
50906+ sysctl option with name "chroot_execlog" is created.
50907+
50908+config GRKERNSEC_AUDIT_PTRACE
50909+ bool "Ptrace logging"
50910+ help
50911+ If you say Y here, all attempts to attach to a process via ptrace
50912+ will be logged. If the sysctl option is enabled, a sysctl option
50913+ with name "audit_ptrace" is created.
50914+
50915+config GRKERNSEC_AUDIT_CHDIR
50916+ bool "Chdir logging"
50917+ help
50918+ If you say Y here, all chdir() calls will be logged. If the sysctl
50919+ option is enabled, a sysctl option with name "audit_chdir" is created.
50920+
50921+config GRKERNSEC_AUDIT_MOUNT
50922+ bool "(Un)Mount logging"
50923+ help
50924+ If you say Y here, all mounts and unmounts will be logged. If the
50925+ sysctl option is enabled, a sysctl option with name "audit_mount" is
50926+ created.
50927+
50928+config GRKERNSEC_SIGNAL
50929+ bool "Signal logging"
50930+ default y if GRKERNSEC_CONFIG_AUTO
50931+ help
50932+ If you say Y here, certain important signals will be logged, such as
50933+ SIGSEGV, which will as a result inform you of when a error in a program
50934+ occurred, which in some cases could mean a possible exploit attempt.
50935+ If the sysctl option is enabled, a sysctl option with name
50936+ "signal_logging" is created.
50937+
50938+config GRKERNSEC_FORKFAIL
50939+ bool "Fork failure logging"
50940+ help
50941+ If you say Y here, all failed fork() attempts will be logged.
50942+ This could suggest a fork bomb, or someone attempting to overstep
50943+ their process limit. If the sysctl option is enabled, a sysctl option
50944+ with name "forkfail_logging" is created.
50945+
50946+config GRKERNSEC_TIME
50947+ bool "Time change logging"
50948+ default y if GRKERNSEC_CONFIG_AUTO
50949+ help
50950+ If you say Y here, any changes of the system clock will be logged.
50951+ If the sysctl option is enabled, a sysctl option with name
50952+ "timechange_logging" is created.
50953+
50954+config GRKERNSEC_PROC_IPADDR
50955+ bool "/proc/<pid>/ipaddr support"
50956+ default y if GRKERNSEC_CONFIG_AUTO
50957+ help
50958+ If you say Y here, a new entry will be added to each /proc/<pid>
50959+ directory that contains the IP address of the person using the task.
50960+ The IP is carried across local TCP and AF_UNIX stream sockets.
50961+ This information can be useful for IDS/IPSes to perform remote response
50962+ to a local attack. The entry is readable by only the owner of the
50963+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50964+ the RBAC system), and thus does not create privacy concerns.
50965+
50966+config GRKERNSEC_RWXMAP_LOG
50967+ bool 'Denied RWX mmap/mprotect logging'
50968+ default y if GRKERNSEC_CONFIG_AUTO
50969+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50970+ help
50971+ If you say Y here, calls to mmap() and mprotect() with explicit
50972+ usage of PROT_WRITE and PROT_EXEC together will be logged when
50973+ denied by the PAX_MPROTECT feature. If the sysctl option is
50974+ enabled, a sysctl option with name "rwxmap_logging" is created.
50975+
50976+config GRKERNSEC_AUDIT_TEXTREL
50977+ bool 'ELF text relocations logging (READ HELP)'
50978+ depends on PAX_MPROTECT
50979+ help
50980+ If you say Y here, text relocations will be logged with the filename
50981+ of the offending library or binary. The purpose of the feature is
50982+ to help Linux distribution developers get rid of libraries and
50983+ binaries that need text relocations which hinder the future progress
50984+ of PaX. Only Linux distribution developers should say Y here, and
50985+ never on a production machine, as this option creates an information
50986+ leak that could aid an attacker in defeating the randomization of
50987+ a single memory region. If the sysctl option is enabled, a sysctl
50988+ option with name "audit_textrel" is created.
50989+
50990+endmenu
50991+
50992+menu "Executable Protections"
50993+depends on GRKERNSEC
50994+
50995+config GRKERNSEC_DMESG
50996+ bool "Dmesg(8) restriction"
50997+ default y if GRKERNSEC_CONFIG_AUTO
50998+ help
50999+ If you say Y here, non-root users will not be able to use dmesg(8)
51000+ to view up to the last 4kb of messages in the kernel's log buffer.
51001+ The kernel's log buffer often contains kernel addresses and other
51002+ identifying information useful to an attacker in fingerprinting a
51003+ system for a targeted exploit.
51004+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
51005+ created.
51006+
51007+config GRKERNSEC_HARDEN_PTRACE
51008+ bool "Deter ptrace-based process snooping"
51009+ default y if GRKERNSEC_CONFIG_AUTO
51010+ help
51011+ If you say Y here, TTY sniffers and other malicious monitoring
51012+ programs implemented through ptrace will be defeated. If you
51013+ have been using the RBAC system, this option has already been
51014+ enabled for several years for all users, with the ability to make
51015+ fine-grained exceptions.
51016+
51017+ This option only affects the ability of non-root users to ptrace
51018+ processes that are not a descendent of the ptracing process.
51019+ This means that strace ./binary and gdb ./binary will still work,
51020+ but attaching to arbitrary processes will not. If the sysctl
51021+ option is enabled, a sysctl option with name "harden_ptrace" is
51022+ created.
51023+
51024+config GRKERNSEC_PTRACE_READEXEC
51025+ bool "Require read access to ptrace sensitive binaries"
51026+ default y if GRKERNSEC_CONFIG_AUTO
51027+ help
51028+ If you say Y here, unprivileged users will not be able to ptrace unreadable
51029+ binaries. This option is useful in environments that
51030+ remove the read bits (e.g. file mode 4711) from suid binaries to
51031+ prevent infoleaking of their contents. This option adds
51032+ consistency to the use of that file mode, as the binary could normally
51033+ be read out when run without privileges while ptracing.
51034+
51035+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
51036+ is created.
51037+
51038+config GRKERNSEC_SETXID
51039+ bool "Enforce consistent multithreaded privileges"
51040+ default y if GRKERNSEC_CONFIG_AUTO
51041+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
51042+ help
51043+ If you say Y here, a change from a root uid to a non-root uid
51044+ in a multithreaded application will cause the resulting uids,
51045+ gids, supplementary groups, and capabilities in that thread
51046+ to be propagated to the other threads of the process. In most
51047+ cases this is unnecessary, as glibc will emulate this behavior
51048+ on behalf of the application. Other libcs do not act in the
51049+ same way, allowing the other threads of the process to continue
51050+ running with root privileges. If the sysctl option is enabled,
51051+ a sysctl option with name "consistent_setxid" is created.
51052+
51053+config GRKERNSEC_TPE
51054+ bool "Trusted Path Execution (TPE)"
51055+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
51056+ help
51057+ If you say Y here, you will be able to choose a gid to add to the
51058+ supplementary groups of users you want to mark as "untrusted."
51059+ These users will not be able to execute any files that are not in
51060+ root-owned directories writable only by root. If the sysctl option
51061+ is enabled, a sysctl option with name "tpe" is created.
51062+
51063+config GRKERNSEC_TPE_ALL
51064+ bool "Partially restrict all non-root users"
51065+ depends on GRKERNSEC_TPE
51066+ help
51067+ If you say Y here, all non-root users will be covered under
51068+ a weaker TPE restriction. This is separate from, and in addition to,
51069+ the main TPE options that you have selected elsewhere. Thus, if a
51070+ "trusted" GID is chosen, this restriction applies to even that GID.
51071+ Under this restriction, all non-root users will only be allowed to
51072+ execute files in directories they own that are not group or
51073+ world-writable, or in directories owned by root and writable only by
51074+ root. If the sysctl option is enabled, a sysctl option with name
51075+ "tpe_restrict_all" is created.
51076+
51077+config GRKERNSEC_TPE_INVERT
51078+ bool "Invert GID option"
51079+ depends on GRKERNSEC_TPE
51080+ help
51081+ If you say Y here, the group you specify in the TPE configuration will
51082+ decide what group TPE restrictions will be *disabled* for. This
51083+ option is useful if you want TPE restrictions to be applied to most
51084+ users on the system. If the sysctl option is enabled, a sysctl option
51085+ with name "tpe_invert" is created. Unlike other sysctl options, this
51086+ entry will default to on for backward-compatibility.
51087+
51088+config GRKERNSEC_TPE_GID
51089+ int "GID for untrusted users"
51090+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
51091+ default 1005
51092+ help
51093+ Setting this GID determines what group TPE restrictions will be
51094+ *enabled* for. If the sysctl option is enabled, a sysctl option
51095+ with name "tpe_gid" is created.
51096+
51097+config GRKERNSEC_TPE_GID
51098+ int "GID for trusted users"
51099+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
51100+ default 1005
51101+ help
51102+ Setting this GID determines what group TPE restrictions will be
51103+ *disabled* for. If the sysctl option is enabled, a sysctl option
51104+ with name "tpe_gid" is created.
51105+
51106+endmenu
51107+menu "Network Protections"
51108+depends on GRKERNSEC
51109+
51110+config GRKERNSEC_RANDNET
51111+ bool "Larger entropy pools"
51112+ default y if GRKERNSEC_CONFIG_AUTO
51113+ help
51114+ If you say Y here, the entropy pools used for many features of Linux
51115+ and grsecurity will be doubled in size. Since several grsecurity
51116+ features use additional randomness, it is recommended that you say Y
51117+ here. Saying Y here has a similar effect as modifying
51118+ /proc/sys/kernel/random/poolsize.
51119+
51120+config GRKERNSEC_BLACKHOLE
51121+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
51122+ default y if GRKERNSEC_CONFIG_AUTO
51123+ depends on NET
51124+ help
51125+ If you say Y here, neither TCP resets nor ICMP
51126+ destination-unreachable packets will be sent in response to packets
51127+ sent to ports for which no associated listening process exists.
51128+ This feature supports both IPV4 and IPV6 and exempts the
51129+ loopback interface from blackholing. Enabling this feature
51130+ makes a host more resilient to DoS attacks and reduces network
51131+ visibility against scanners.
51132+
51133+ The blackhole feature as-implemented is equivalent to the FreeBSD
51134+ blackhole feature, as it prevents RST responses to all packets, not
51135+ just SYNs. Under most application behavior this causes no
51136+ problems, but applications (like haproxy) may not close certain
51137+ connections in a way that cleanly terminates them on the remote
51138+ end, leaving the remote host in LAST_ACK state. Because of this
51139+ side-effect and to prevent intentional LAST_ACK DoSes, this
51140+ feature also adds automatic mitigation against such attacks.
51141+ The mitigation drastically reduces the amount of time a socket
51142+ can spend in LAST_ACK state. If you're using haproxy and not
51143+ all servers it connects to have this option enabled, consider
51144+ disabling this feature on the haproxy host.
51145+
51146+ If the sysctl option is enabled, two sysctl options with names
51147+ "ip_blackhole" and "lastack_retries" will be created.
51148+ While "ip_blackhole" takes the standard zero/non-zero on/off
51149+ toggle, "lastack_retries" uses the same kinds of values as
51150+ "tcp_retries1" and "tcp_retries2". The default value of 4
51151+ prevents a socket from lasting more than 45 seconds in LAST_ACK
51152+ state.
51153+
51154+config GRKERNSEC_SOCKET
51155+ bool "Socket restrictions"
51156+ depends on NET
51157+ help
51158+ If you say Y here, you will be able to choose from several options.
51159+ If you assign a GID on your system and add it to the supplementary
51160+ groups of users you want to restrict socket access to, this patch
51161+ will perform up to three things, based on the option(s) you choose.
51162+
51163+config GRKERNSEC_SOCKET_ALL
51164+ bool "Deny any sockets to group"
51165+ depends on GRKERNSEC_SOCKET
51166+ help
51167+ If you say Y here, you will be able to choose a GID of whose users will
51168+ be unable to connect to other hosts from your machine or run server
51169+ applications from your machine. If the sysctl option is enabled, a
51170+ sysctl option with name "socket_all" is created.
51171+
51172+config GRKERNSEC_SOCKET_ALL_GID
51173+ int "GID to deny all sockets for"
51174+ depends on GRKERNSEC_SOCKET_ALL
51175+ default 1004
51176+ help
51177+ Here you can choose the GID to disable socket access for. Remember to
51178+ add the users you want socket access disabled for to the GID
51179+ specified here. If the sysctl option is enabled, a sysctl option
51180+ with name "socket_all_gid" is created.
51181+
51182+config GRKERNSEC_SOCKET_CLIENT
51183+ bool "Deny client sockets to group"
51184+ depends on GRKERNSEC_SOCKET
51185+ help
51186+ If you say Y here, you will be able to choose a GID of whose users will
51187+ be unable to connect to other hosts from your machine, but will be
51188+ able to run servers. If this option is enabled, all users in the group
51189+ you specify will have to use passive mode when initiating ftp transfers
51190+ from the shell on your machine. If the sysctl option is enabled, a
51191+ sysctl option with name "socket_client" is created.
51192+
51193+config GRKERNSEC_SOCKET_CLIENT_GID
51194+ int "GID to deny client sockets for"
51195+ depends on GRKERNSEC_SOCKET_CLIENT
51196+ default 1003
51197+ help
51198+ Here you can choose the GID to disable client socket access for.
51199+ Remember to add the users you want client socket access disabled for to
51200+ the GID specified here. If the sysctl option is enabled, a sysctl
51201+ option with name "socket_client_gid" is created.
51202+
51203+config GRKERNSEC_SOCKET_SERVER
51204+ bool "Deny server sockets to group"
51205+ depends on GRKERNSEC_SOCKET
51206+ help
51207+ If you say Y here, you will be able to choose a GID of whose users will
51208+ be unable to run server applications from your machine. If the sysctl
51209+ option is enabled, a sysctl option with name "socket_server" is created.
51210+
51211+config GRKERNSEC_SOCKET_SERVER_GID
51212+ int "GID to deny server sockets for"
51213+ depends on GRKERNSEC_SOCKET_SERVER
51214+ default 1002
51215+ help
51216+ Here you can choose the GID to disable server socket access for.
51217+ Remember to add the users you want server socket access disabled for to
51218+ the GID specified here. If the sysctl option is enabled, a sysctl
51219+ option with name "socket_server_gid" is created.
51220+
51221+endmenu
51222+menu "Sysctl Support"
51223+depends on GRKERNSEC && SYSCTL
51224+
51225+config GRKERNSEC_SYSCTL
51226+ bool "Sysctl support"
51227+ default y if GRKERNSEC_CONFIG_AUTO
51228+ help
51229+ If you say Y here, you will be able to change the options that
51230+ grsecurity runs with at bootup, without having to recompile your
51231+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
51232+ to enable (1) or disable (0) various features. All the sysctl entries
51233+ are mutable until the "grsec_lock" entry is set to a non-zero value.
51234+ All features enabled in the kernel configuration are disabled at boot
51235+ if you do not say Y to the "Turn on features by default" option.
51236+ All options should be set at startup, and the grsec_lock entry should
51237+ be set to a non-zero value after all the options are set.
51238+ *THIS IS EXTREMELY IMPORTANT*
51239+
51240+config GRKERNSEC_SYSCTL_DISTRO
51241+ bool "Extra sysctl support for distro makers (READ HELP)"
51242+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
51243+ help
51244+ If you say Y here, additional sysctl options will be created
51245+ for features that affect processes running as root. Therefore,
51246+ it is critical when using this option that the grsec_lock entry be
51247+ enabled after boot. Only distros with prebuilt kernel packages
51248+ with this option enabled that can ensure grsec_lock is enabled
51249+ after boot should use this option.
51250+ *Failure to set grsec_lock after boot makes all grsec features
51251+ this option covers useless*
51252+
51253+ Currently this option creates the following sysctl entries:
51254+ "Disable Privileged I/O": "disable_priv_io"
51255+
51256+config GRKERNSEC_SYSCTL_ON
51257+ bool "Turn on features by default"
51258+ default y if GRKERNSEC_CONFIG_AUTO
51259+ depends on GRKERNSEC_SYSCTL
51260+ help
51261+ If you say Y here, instead of having all features enabled in the
51262+ kernel configuration disabled at boot time, the features will be
51263+ enabled at boot time. It is recommended you say Y here unless
51264+ there is some reason you would want all sysctl-tunable features to
51265+ be disabled by default. As mentioned elsewhere, it is important
51266+ to enable the grsec_lock entry once you have finished modifying
51267+ the sysctl entries.
51268+
51269+endmenu
51270+menu "Logging Options"
51271+depends on GRKERNSEC
51272+
51273+config GRKERNSEC_FLOODTIME
51274+ int "Seconds in between log messages (minimum)"
51275+ default 10
51276+ help
51277+ This option allows you to enforce the number of seconds between
51278+ grsecurity log messages. The default should be suitable for most
51279+ people, however, if you choose to change it, choose a value small enough
51280+ to allow informative logs to be produced, but large enough to
51281+ prevent flooding.
51282+
51283+config GRKERNSEC_FLOODBURST
51284+ int "Number of messages in a burst (maximum)"
51285+ default 6
51286+ help
51287+ This option allows you to choose the maximum number of messages allowed
51288+ within the flood time interval you chose in a separate option. The
51289+ default should be suitable for most people, however if you find that
51290+ many of your logs are being interpreted as flooding, you may want to
51291+ raise this value.
51292+
51293+endmenu
51294diff --git a/grsecurity/Makefile b/grsecurity/Makefile
51295new file mode 100644
51296index 0000000..1b9afa9
51297--- /dev/null
51298+++ b/grsecurity/Makefile
51299@@ -0,0 +1,38 @@
51300+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
51301+# during 2001-2009 it has been completely redesigned by Brad Spengler
51302+# into an RBAC system
51303+#
51304+# All code in this directory and various hooks inserted throughout the kernel
51305+# are copyright Brad Spengler - Open Source Security, Inc., and released
51306+# under the GPL v2 or higher
51307+
51308+KBUILD_CFLAGS += -Werror
51309+
51310+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
51311+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
51312+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
51313+
51314+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
51315+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
51316+ gracl_learn.o grsec_log.o
51317+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
51318+
51319+ifdef CONFIG_NET
51320+obj-y += grsec_sock.o
51321+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
51322+endif
51323+
51324+ifndef CONFIG_GRKERNSEC
51325+obj-y += grsec_disabled.o
51326+endif
51327+
51328+ifdef CONFIG_GRKERNSEC_HIDESYM
51329+extra-y := grsec_hidesym.o
51330+$(obj)/grsec_hidesym.o:
51331+ @-chmod -f 500 /boot
51332+ @-chmod -f 500 /lib/modules
51333+ @-chmod -f 500 /lib64/modules
51334+ @-chmod -f 500 /lib32/modules
51335+ @-chmod -f 700 .
51336+ @echo ' grsec: protected kernel image paths'
51337+endif
51338diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
51339new file mode 100644
51340index 0000000..1561617
51341--- /dev/null
51342+++ b/grsecurity/gracl.c
51343@@ -0,0 +1,4017 @@
51344+#include <linux/kernel.h>
51345+#include <linux/module.h>
51346+#include <linux/sched.h>
51347+#include <linux/mm.h>
51348+#include <linux/file.h>
51349+#include <linux/fs.h>
51350+#include <linux/namei.h>
51351+#include <linux/mount.h>
51352+#include <linux/tty.h>
51353+#include <linux/proc_fs.h>
51354+#include <linux/lglock.h>
51355+#include <linux/slab.h>
51356+#include <linux/vmalloc.h>
51357+#include <linux/types.h>
51358+#include <linux/sysctl.h>
51359+#include <linux/netdevice.h>
51360+#include <linux/ptrace.h>
51361+#include <linux/gracl.h>
51362+#include <linux/gralloc.h>
51363+#include <linux/security.h>
51364+#include <linux/grinternal.h>
51365+#include <linux/pid_namespace.h>
51366+#include <linux/stop_machine.h>
51367+#include <linux/fdtable.h>
51368+#include <linux/percpu.h>
51369+#include <linux/lglock.h>
51370+#include "../fs/mount.h"
51371+
51372+#include <asm/uaccess.h>
51373+#include <asm/errno.h>
51374+#include <asm/mman.h>
51375+
51376+extern struct lglock vfsmount_lock;
51377+
51378+static struct acl_role_db acl_role_set;
51379+static struct name_db name_set;
51380+static struct inodev_db inodev_set;
51381+
51382+/* for keeping track of userspace pointers used for subjects, so we
51383+ can share references in the kernel as well
51384+*/
51385+
51386+static struct path real_root;
51387+
51388+static struct acl_subj_map_db subj_map_set;
51389+
51390+static struct acl_role_label *default_role;
51391+
51392+static struct acl_role_label *role_list;
51393+
51394+static u16 acl_sp_role_value;
51395+
51396+extern char *gr_shared_page[4];
51397+static DEFINE_MUTEX(gr_dev_mutex);
51398+DEFINE_RWLOCK(gr_inode_lock);
51399+
51400+struct gr_arg *gr_usermode;
51401+
51402+static unsigned int gr_status __read_only = GR_STATUS_INIT;
51403+
51404+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
51405+extern void gr_clear_learn_entries(void);
51406+
51407+#ifdef CONFIG_GRKERNSEC_RESLOG
51408+extern void gr_log_resource(const struct task_struct *task,
51409+ const int res, const unsigned long wanted, const int gt);
51410+#endif
51411+
51412+unsigned char *gr_system_salt;
51413+unsigned char *gr_system_sum;
51414+
51415+static struct sprole_pw **acl_special_roles = NULL;
51416+static __u16 num_sprole_pws = 0;
51417+
51418+static struct acl_role_label *kernel_role = NULL;
51419+
51420+static unsigned int gr_auth_attempts = 0;
51421+static unsigned long gr_auth_expires = 0UL;
51422+
51423+#ifdef CONFIG_NET
51424+extern struct vfsmount *sock_mnt;
51425+#endif
51426+
51427+extern struct vfsmount *pipe_mnt;
51428+extern struct vfsmount *shm_mnt;
51429+#ifdef CONFIG_HUGETLBFS
51430+extern struct vfsmount *hugetlbfs_vfsmount;
51431+#endif
51432+
51433+static struct acl_object_label *fakefs_obj_rw;
51434+static struct acl_object_label *fakefs_obj_rwx;
51435+
51436+extern int gr_init_uidset(void);
51437+extern void gr_free_uidset(void);
51438+extern void gr_remove_uid(uid_t uid);
51439+extern int gr_find_uid(uid_t uid);
51440+
51441+__inline__ int
51442+gr_acl_is_enabled(void)
51443+{
51444+ return (gr_status & GR_READY);
51445+}
51446+
51447+#ifdef CONFIG_BTRFS_FS
51448+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
51449+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
51450+#endif
51451+
51452+static inline dev_t __get_dev(const struct dentry *dentry)
51453+{
51454+#ifdef CONFIG_BTRFS_FS
51455+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
51456+ return get_btrfs_dev_from_inode(dentry->d_inode);
51457+ else
51458+#endif
51459+ return dentry->d_inode->i_sb->s_dev;
51460+}
51461+
51462+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
51463+{
51464+ return __get_dev(dentry);
51465+}
51466+
51467+static char gr_task_roletype_to_char(struct task_struct *task)
51468+{
51469+ switch (task->role->roletype &
51470+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
51471+ GR_ROLE_SPECIAL)) {
51472+ case GR_ROLE_DEFAULT:
51473+ return 'D';
51474+ case GR_ROLE_USER:
51475+ return 'U';
51476+ case GR_ROLE_GROUP:
51477+ return 'G';
51478+ case GR_ROLE_SPECIAL:
51479+ return 'S';
51480+ }
51481+
51482+ return 'X';
51483+}
51484+
51485+char gr_roletype_to_char(void)
51486+{
51487+ return gr_task_roletype_to_char(current);
51488+}
51489+
51490+__inline__ int
51491+gr_acl_tpe_check(void)
51492+{
51493+ if (unlikely(!(gr_status & GR_READY)))
51494+ return 0;
51495+ if (current->role->roletype & GR_ROLE_TPE)
51496+ return 1;
51497+ else
51498+ return 0;
51499+}
51500+
51501+int
51502+gr_handle_rawio(const struct inode *inode)
51503+{
51504+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51505+ if (inode && S_ISBLK(inode->i_mode) &&
51506+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
51507+ !capable(CAP_SYS_RAWIO))
51508+ return 1;
51509+#endif
51510+ return 0;
51511+}
51512+
51513+static int
51514+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
51515+{
51516+ if (likely(lena != lenb))
51517+ return 0;
51518+
51519+ return !memcmp(a, b, lena);
51520+}
51521+
51522+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
51523+{
51524+ *buflen -= namelen;
51525+ if (*buflen < 0)
51526+ return -ENAMETOOLONG;
51527+ *buffer -= namelen;
51528+ memcpy(*buffer, str, namelen);
51529+ return 0;
51530+}
51531+
51532+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
51533+{
51534+ return prepend(buffer, buflen, name->name, name->len);
51535+}
51536+
51537+static int prepend_path(const struct path *path, struct path *root,
51538+ char **buffer, int *buflen)
51539+{
51540+ struct dentry *dentry = path->dentry;
51541+ struct vfsmount *vfsmnt = path->mnt;
51542+ struct mount *mnt = real_mount(vfsmnt);
51543+ bool slash = false;
51544+ int error = 0;
51545+
51546+ while (dentry != root->dentry || vfsmnt != root->mnt) {
51547+ struct dentry * parent;
51548+
51549+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
51550+ /* Global root? */
51551+ if (!mnt_has_parent(mnt)) {
51552+ goto out;
51553+ }
51554+ dentry = mnt->mnt_mountpoint;
51555+ mnt = mnt->mnt_parent;
51556+ vfsmnt = &mnt->mnt;
51557+ continue;
51558+ }
51559+ parent = dentry->d_parent;
51560+ prefetch(parent);
51561+ spin_lock(&dentry->d_lock);
51562+ error = prepend_name(buffer, buflen, &dentry->d_name);
51563+ spin_unlock(&dentry->d_lock);
51564+ if (!error)
51565+ error = prepend(buffer, buflen, "/", 1);
51566+ if (error)
51567+ break;
51568+
51569+ slash = true;
51570+ dentry = parent;
51571+ }
51572+
51573+out:
51574+ if (!error && !slash)
51575+ error = prepend(buffer, buflen, "/", 1);
51576+
51577+ return error;
51578+}
51579+
51580+/* this must be called with vfsmount_lock and rename_lock held */
51581+
51582+static char *__our_d_path(const struct path *path, struct path *root,
51583+ char *buf, int buflen)
51584+{
51585+ char *res = buf + buflen;
51586+ int error;
51587+
51588+ prepend(&res, &buflen, "\0", 1);
51589+ error = prepend_path(path, root, &res, &buflen);
51590+ if (error)
51591+ return ERR_PTR(error);
51592+
51593+ return res;
51594+}
51595+
51596+static char *
51597+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
51598+{
51599+ char *retval;
51600+
51601+ retval = __our_d_path(path, root, buf, buflen);
51602+ if (unlikely(IS_ERR(retval)))
51603+ retval = strcpy(buf, "<path too long>");
51604+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
51605+ retval[1] = '\0';
51606+
51607+ return retval;
51608+}
51609+
51610+static char *
51611+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
51612+ char *buf, int buflen)
51613+{
51614+ struct path path;
51615+ char *res;
51616+
51617+ path.dentry = (struct dentry *)dentry;
51618+ path.mnt = (struct vfsmount *)vfsmnt;
51619+
51620+ /* we can use real_root.dentry, real_root.mnt, because this is only called
51621+ by the RBAC system */
51622+ res = gen_full_path(&path, &real_root, buf, buflen);
51623+
51624+ return res;
51625+}
51626+
51627+static char *
51628+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
51629+ char *buf, int buflen)
51630+{
51631+ char *res;
51632+ struct path path;
51633+ struct path root;
51634+ struct task_struct *reaper = init_pid_ns.child_reaper;
51635+
51636+ path.dentry = (struct dentry *)dentry;
51637+ path.mnt = (struct vfsmount *)vfsmnt;
51638+
51639+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
51640+ get_fs_root(reaper->fs, &root);
51641+
51642+ write_seqlock(&rename_lock);
51643+ br_read_lock(&vfsmount_lock);
51644+ res = gen_full_path(&path, &root, buf, buflen);
51645+ br_read_unlock(&vfsmount_lock);
51646+ write_sequnlock(&rename_lock);
51647+
51648+ path_put(&root);
51649+ return res;
51650+}
51651+
51652+static char *
51653+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51654+{
51655+ char *ret;
51656+ write_seqlock(&rename_lock);
51657+ br_read_lock(&vfsmount_lock);
51658+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51659+ PAGE_SIZE);
51660+ br_read_unlock(&vfsmount_lock);
51661+ write_sequnlock(&rename_lock);
51662+ return ret;
51663+}
51664+
51665+static char *
51666+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51667+{
51668+ char *ret;
51669+ char *buf;
51670+ int buflen;
51671+
51672+ write_seqlock(&rename_lock);
51673+ br_read_lock(&vfsmount_lock);
51674+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51675+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
51676+ buflen = (int)(ret - buf);
51677+ if (buflen >= 5)
51678+ prepend(&ret, &buflen, "/proc", 5);
51679+ else
51680+ ret = strcpy(buf, "<path too long>");
51681+ br_read_unlock(&vfsmount_lock);
51682+ write_sequnlock(&rename_lock);
51683+ return ret;
51684+}
51685+
51686+char *
51687+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
51688+{
51689+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51690+ PAGE_SIZE);
51691+}
51692+
51693+char *
51694+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
51695+{
51696+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51697+ PAGE_SIZE);
51698+}
51699+
51700+char *
51701+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
51702+{
51703+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
51704+ PAGE_SIZE);
51705+}
51706+
51707+char *
51708+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
51709+{
51710+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
51711+ PAGE_SIZE);
51712+}
51713+
51714+char *
51715+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
51716+{
51717+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
51718+ PAGE_SIZE);
51719+}
51720+
51721+__inline__ __u32
51722+to_gr_audit(const __u32 reqmode)
51723+{
51724+ /* masks off auditable permission flags, then shifts them to create
51725+ auditing flags, and adds the special case of append auditing if
51726+ we're requesting write */
51727+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
51728+}
51729+
51730+struct acl_subject_label *
51731+lookup_subject_map(const struct acl_subject_label *userp)
51732+{
51733+ unsigned int index = shash(userp, subj_map_set.s_size);
51734+ struct subject_map *match;
51735+
51736+ match = subj_map_set.s_hash[index];
51737+
51738+ while (match && match->user != userp)
51739+ match = match->next;
51740+
51741+ if (match != NULL)
51742+ return match->kernel;
51743+ else
51744+ return NULL;
51745+}
51746+
51747+static void
51748+insert_subj_map_entry(struct subject_map *subjmap)
51749+{
51750+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
51751+ struct subject_map **curr;
51752+
51753+ subjmap->prev = NULL;
51754+
51755+ curr = &subj_map_set.s_hash[index];
51756+ if (*curr != NULL)
51757+ (*curr)->prev = subjmap;
51758+
51759+ subjmap->next = *curr;
51760+ *curr = subjmap;
51761+
51762+ return;
51763+}
51764+
51765+static struct acl_role_label *
51766+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
51767+ const gid_t gid)
51768+{
51769+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
51770+ struct acl_role_label *match;
51771+ struct role_allowed_ip *ipp;
51772+ unsigned int x;
51773+ u32 curr_ip = task->signal->curr_ip;
51774+
51775+ task->signal->saved_ip = curr_ip;
51776+
51777+ match = acl_role_set.r_hash[index];
51778+
51779+ while (match) {
51780+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
51781+ for (x = 0; x < match->domain_child_num; x++) {
51782+ if (match->domain_children[x] == uid)
51783+ goto found;
51784+ }
51785+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
51786+ break;
51787+ match = match->next;
51788+ }
51789+found:
51790+ if (match == NULL) {
51791+ try_group:
51792+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
51793+ match = acl_role_set.r_hash[index];
51794+
51795+ while (match) {
51796+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
51797+ for (x = 0; x < match->domain_child_num; x++) {
51798+ if (match->domain_children[x] == gid)
51799+ goto found2;
51800+ }
51801+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
51802+ break;
51803+ match = match->next;
51804+ }
51805+found2:
51806+ if (match == NULL)
51807+ match = default_role;
51808+ if (match->allowed_ips == NULL)
51809+ return match;
51810+ else {
51811+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51812+ if (likely
51813+ ((ntohl(curr_ip) & ipp->netmask) ==
51814+ (ntohl(ipp->addr) & ipp->netmask)))
51815+ return match;
51816+ }
51817+ match = default_role;
51818+ }
51819+ } else if (match->allowed_ips == NULL) {
51820+ return match;
51821+ } else {
51822+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51823+ if (likely
51824+ ((ntohl(curr_ip) & ipp->netmask) ==
51825+ (ntohl(ipp->addr) & ipp->netmask)))
51826+ return match;
51827+ }
51828+ goto try_group;
51829+ }
51830+
51831+ return match;
51832+}
51833+
51834+struct acl_subject_label *
51835+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
51836+ const struct acl_role_label *role)
51837+{
51838+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51839+ struct acl_subject_label *match;
51840+
51841+ match = role->subj_hash[index];
51842+
51843+ while (match && (match->inode != ino || match->device != dev ||
51844+ (match->mode & GR_DELETED))) {
51845+ match = match->next;
51846+ }
51847+
51848+ if (match && !(match->mode & GR_DELETED))
51849+ return match;
51850+ else
51851+ return NULL;
51852+}
51853+
51854+struct acl_subject_label *
51855+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
51856+ const struct acl_role_label *role)
51857+{
51858+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51859+ struct acl_subject_label *match;
51860+
51861+ match = role->subj_hash[index];
51862+
51863+ while (match && (match->inode != ino || match->device != dev ||
51864+ !(match->mode & GR_DELETED))) {
51865+ match = match->next;
51866+ }
51867+
51868+ if (match && (match->mode & GR_DELETED))
51869+ return match;
51870+ else
51871+ return NULL;
51872+}
51873+
51874+static struct acl_object_label *
51875+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
51876+ const struct acl_subject_label *subj)
51877+{
51878+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51879+ struct acl_object_label *match;
51880+
51881+ match = subj->obj_hash[index];
51882+
51883+ while (match && (match->inode != ino || match->device != dev ||
51884+ (match->mode & GR_DELETED))) {
51885+ match = match->next;
51886+ }
51887+
51888+ if (match && !(match->mode & GR_DELETED))
51889+ return match;
51890+ else
51891+ return NULL;
51892+}
51893+
51894+static struct acl_object_label *
51895+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
51896+ const struct acl_subject_label *subj)
51897+{
51898+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51899+ struct acl_object_label *match;
51900+
51901+ match = subj->obj_hash[index];
51902+
51903+ while (match && (match->inode != ino || match->device != dev ||
51904+ !(match->mode & GR_DELETED))) {
51905+ match = match->next;
51906+ }
51907+
51908+ if (match && (match->mode & GR_DELETED))
51909+ return match;
51910+
51911+ match = subj->obj_hash[index];
51912+
51913+ while (match && (match->inode != ino || match->device != dev ||
51914+ (match->mode & GR_DELETED))) {
51915+ match = match->next;
51916+ }
51917+
51918+ if (match && !(match->mode & GR_DELETED))
51919+ return match;
51920+ else
51921+ return NULL;
51922+}
51923+
51924+static struct name_entry *
51925+lookup_name_entry(const char *name)
51926+{
51927+ unsigned int len = strlen(name);
51928+ unsigned int key = full_name_hash(name, len);
51929+ unsigned int index = key % name_set.n_size;
51930+ struct name_entry *match;
51931+
51932+ match = name_set.n_hash[index];
51933+
51934+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
51935+ match = match->next;
51936+
51937+ return match;
51938+}
51939+
51940+static struct name_entry *
51941+lookup_name_entry_create(const char *name)
51942+{
51943+ unsigned int len = strlen(name);
51944+ unsigned int key = full_name_hash(name, len);
51945+ unsigned int index = key % name_set.n_size;
51946+ struct name_entry *match;
51947+
51948+ match = name_set.n_hash[index];
51949+
51950+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51951+ !match->deleted))
51952+ match = match->next;
51953+
51954+ if (match && match->deleted)
51955+ return match;
51956+
51957+ match = name_set.n_hash[index];
51958+
51959+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51960+ match->deleted))
51961+ match = match->next;
51962+
51963+ if (match && !match->deleted)
51964+ return match;
51965+ else
51966+ return NULL;
51967+}
51968+
51969+static struct inodev_entry *
51970+lookup_inodev_entry(const ino_t ino, const dev_t dev)
51971+{
51972+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
51973+ struct inodev_entry *match;
51974+
51975+ match = inodev_set.i_hash[index];
51976+
51977+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
51978+ match = match->next;
51979+
51980+ return match;
51981+}
51982+
51983+static void
51984+insert_inodev_entry(struct inodev_entry *entry)
51985+{
51986+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
51987+ inodev_set.i_size);
51988+ struct inodev_entry **curr;
51989+
51990+ entry->prev = NULL;
51991+
51992+ curr = &inodev_set.i_hash[index];
51993+ if (*curr != NULL)
51994+ (*curr)->prev = entry;
51995+
51996+ entry->next = *curr;
51997+ *curr = entry;
51998+
51999+ return;
52000+}
52001+
52002+static void
52003+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
52004+{
52005+ unsigned int index =
52006+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
52007+ struct acl_role_label **curr;
52008+ struct acl_role_label *tmp, *tmp2;
52009+
52010+ curr = &acl_role_set.r_hash[index];
52011+
52012+ /* simple case, slot is empty, just set it to our role */
52013+ if (*curr == NULL) {
52014+ *curr = role;
52015+ } else {
52016+ /* example:
52017+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
52018+ 2 -> 3
52019+ */
52020+ /* first check to see if we can already be reached via this slot */
52021+ tmp = *curr;
52022+ while (tmp && tmp != role)
52023+ tmp = tmp->next;
52024+ if (tmp == role) {
52025+ /* we don't need to add ourselves to this slot's chain */
52026+ return;
52027+ }
52028+ /* we need to add ourselves to this chain, two cases */
52029+ if (role->next == NULL) {
52030+ /* simple case, append the current chain to our role */
52031+ role->next = *curr;
52032+ *curr = role;
52033+ } else {
52034+ /* 1 -> 2 -> 3 -> 4
52035+ 2 -> 3 -> 4
52036+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
52037+ */
52038+ /* trickier case: walk our role's chain until we find
52039+ the role for the start of the current slot's chain */
52040+ tmp = role;
52041+ tmp2 = *curr;
52042+ while (tmp->next && tmp->next != tmp2)
52043+ tmp = tmp->next;
52044+ if (tmp->next == tmp2) {
52045+ /* from example above, we found 3, so just
52046+ replace this slot's chain with ours */
52047+ *curr = role;
52048+ } else {
52049+ /* we didn't find a subset of our role's chain
52050+ in the current slot's chain, so append their
52051+ chain to ours, and set us as the first role in
52052+ the slot's chain
52053+
52054+ we could fold this case with the case above,
52055+ but making it explicit for clarity
52056+ */
52057+ tmp->next = tmp2;
52058+ *curr = role;
52059+ }
52060+ }
52061+ }
52062+
52063+ return;
52064+}
52065+
52066+static void
52067+insert_acl_role_label(struct acl_role_label *role)
52068+{
52069+ int i;
52070+
52071+ if (role_list == NULL) {
52072+ role_list = role;
52073+ role->prev = NULL;
52074+ } else {
52075+ role->prev = role_list;
52076+ role_list = role;
52077+ }
52078+
52079+ /* used for hash chains */
52080+ role->next = NULL;
52081+
52082+ if (role->roletype & GR_ROLE_DOMAIN) {
52083+ for (i = 0; i < role->domain_child_num; i++)
52084+ __insert_acl_role_label(role, role->domain_children[i]);
52085+ } else
52086+ __insert_acl_role_label(role, role->uidgid);
52087+}
52088+
52089+static int
52090+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
52091+{
52092+ struct name_entry **curr, *nentry;
52093+ struct inodev_entry *ientry;
52094+ unsigned int len = strlen(name);
52095+ unsigned int key = full_name_hash(name, len);
52096+ unsigned int index = key % name_set.n_size;
52097+
52098+ curr = &name_set.n_hash[index];
52099+
52100+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
52101+ curr = &((*curr)->next);
52102+
52103+ if (*curr != NULL)
52104+ return 1;
52105+
52106+ nentry = acl_alloc(sizeof (struct name_entry));
52107+ if (nentry == NULL)
52108+ return 0;
52109+ ientry = acl_alloc(sizeof (struct inodev_entry));
52110+ if (ientry == NULL)
52111+ return 0;
52112+ ientry->nentry = nentry;
52113+
52114+ nentry->key = key;
52115+ nentry->name = name;
52116+ nentry->inode = inode;
52117+ nentry->device = device;
52118+ nentry->len = len;
52119+ nentry->deleted = deleted;
52120+
52121+ nentry->prev = NULL;
52122+ curr = &name_set.n_hash[index];
52123+ if (*curr != NULL)
52124+ (*curr)->prev = nentry;
52125+ nentry->next = *curr;
52126+ *curr = nentry;
52127+
52128+ /* insert us into the table searchable by inode/dev */
52129+ insert_inodev_entry(ientry);
52130+
52131+ return 1;
52132+}
52133+
52134+static void
52135+insert_acl_obj_label(struct acl_object_label *obj,
52136+ struct acl_subject_label *subj)
52137+{
52138+ unsigned int index =
52139+ fhash(obj->inode, obj->device, subj->obj_hash_size);
52140+ struct acl_object_label **curr;
52141+
52142+
52143+ obj->prev = NULL;
52144+
52145+ curr = &subj->obj_hash[index];
52146+ if (*curr != NULL)
52147+ (*curr)->prev = obj;
52148+
52149+ obj->next = *curr;
52150+ *curr = obj;
52151+
52152+ return;
52153+}
52154+
52155+static void
52156+insert_acl_subj_label(struct acl_subject_label *obj,
52157+ struct acl_role_label *role)
52158+{
52159+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
52160+ struct acl_subject_label **curr;
52161+
52162+ obj->prev = NULL;
52163+
52164+ curr = &role->subj_hash[index];
52165+ if (*curr != NULL)
52166+ (*curr)->prev = obj;
52167+
52168+ obj->next = *curr;
52169+ *curr = obj;
52170+
52171+ return;
52172+}
52173+
52174+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
52175+
52176+static void *
52177+create_table(__u32 * len, int elementsize)
52178+{
52179+ unsigned int table_sizes[] = {
52180+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
52181+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
52182+ 4194301, 8388593, 16777213, 33554393, 67108859
52183+ };
52184+ void *newtable = NULL;
52185+ unsigned int pwr = 0;
52186+
52187+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
52188+ table_sizes[pwr] <= *len)
52189+ pwr++;
52190+
52191+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
52192+ return newtable;
52193+
52194+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
52195+ newtable =
52196+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
52197+ else
52198+ newtable = vmalloc(table_sizes[pwr] * elementsize);
52199+
52200+ *len = table_sizes[pwr];
52201+
52202+ return newtable;
52203+}
52204+
52205+static int
52206+init_variables(const struct gr_arg *arg)
52207+{
52208+ struct task_struct *reaper = init_pid_ns.child_reaper;
52209+ unsigned int stacksize;
52210+
52211+ subj_map_set.s_size = arg->role_db.num_subjects;
52212+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
52213+ name_set.n_size = arg->role_db.num_objects;
52214+ inodev_set.i_size = arg->role_db.num_objects;
52215+
52216+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
52217+ !name_set.n_size || !inodev_set.i_size)
52218+ return 1;
52219+
52220+ if (!gr_init_uidset())
52221+ return 1;
52222+
52223+ /* set up the stack that holds allocation info */
52224+
52225+ stacksize = arg->role_db.num_pointers + 5;
52226+
52227+ if (!acl_alloc_stack_init(stacksize))
52228+ return 1;
52229+
52230+ /* grab reference for the real root dentry and vfsmount */
52231+ get_fs_root(reaper->fs, &real_root);
52232+
52233+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52234+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
52235+#endif
52236+
52237+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
52238+ if (fakefs_obj_rw == NULL)
52239+ return 1;
52240+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
52241+
52242+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
52243+ if (fakefs_obj_rwx == NULL)
52244+ return 1;
52245+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
52246+
52247+ subj_map_set.s_hash =
52248+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
52249+ acl_role_set.r_hash =
52250+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
52251+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
52252+ inodev_set.i_hash =
52253+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
52254+
52255+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
52256+ !name_set.n_hash || !inodev_set.i_hash)
52257+ return 1;
52258+
52259+ memset(subj_map_set.s_hash, 0,
52260+ sizeof(struct subject_map *) * subj_map_set.s_size);
52261+ memset(acl_role_set.r_hash, 0,
52262+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
52263+ memset(name_set.n_hash, 0,
52264+ sizeof (struct name_entry *) * name_set.n_size);
52265+ memset(inodev_set.i_hash, 0,
52266+ sizeof (struct inodev_entry *) * inodev_set.i_size);
52267+
52268+ return 0;
52269+}
52270+
52271+/* free information not needed after startup
52272+ currently contains user->kernel pointer mappings for subjects
52273+*/
52274+
52275+static void
52276+free_init_variables(void)
52277+{
52278+ __u32 i;
52279+
52280+ if (subj_map_set.s_hash) {
52281+ for (i = 0; i < subj_map_set.s_size; i++) {
52282+ if (subj_map_set.s_hash[i]) {
52283+ kfree(subj_map_set.s_hash[i]);
52284+ subj_map_set.s_hash[i] = NULL;
52285+ }
52286+ }
52287+
52288+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
52289+ PAGE_SIZE)
52290+ kfree(subj_map_set.s_hash);
52291+ else
52292+ vfree(subj_map_set.s_hash);
52293+ }
52294+
52295+ return;
52296+}
52297+
52298+static void
52299+free_variables(void)
52300+{
52301+ struct acl_subject_label *s;
52302+ struct acl_role_label *r;
52303+ struct task_struct *task, *task2;
52304+ unsigned int x;
52305+
52306+ gr_clear_learn_entries();
52307+
52308+ read_lock(&tasklist_lock);
52309+ do_each_thread(task2, task) {
52310+ task->acl_sp_role = 0;
52311+ task->acl_role_id = 0;
52312+ task->acl = NULL;
52313+ task->role = NULL;
52314+ } while_each_thread(task2, task);
52315+ read_unlock(&tasklist_lock);
52316+
52317+ /* release the reference to the real root dentry and vfsmount */
52318+ path_put(&real_root);
52319+ memset(&real_root, 0, sizeof(real_root));
52320+
52321+ /* free all object hash tables */
52322+
52323+ FOR_EACH_ROLE_START(r)
52324+ if (r->subj_hash == NULL)
52325+ goto next_role;
52326+ FOR_EACH_SUBJECT_START(r, s, x)
52327+ if (s->obj_hash == NULL)
52328+ break;
52329+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
52330+ kfree(s->obj_hash);
52331+ else
52332+ vfree(s->obj_hash);
52333+ FOR_EACH_SUBJECT_END(s, x)
52334+ FOR_EACH_NESTED_SUBJECT_START(r, s)
52335+ if (s->obj_hash == NULL)
52336+ break;
52337+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
52338+ kfree(s->obj_hash);
52339+ else
52340+ vfree(s->obj_hash);
52341+ FOR_EACH_NESTED_SUBJECT_END(s)
52342+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
52343+ kfree(r->subj_hash);
52344+ else
52345+ vfree(r->subj_hash);
52346+ r->subj_hash = NULL;
52347+next_role:
52348+ FOR_EACH_ROLE_END(r)
52349+
52350+ acl_free_all();
52351+
52352+ if (acl_role_set.r_hash) {
52353+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
52354+ PAGE_SIZE)
52355+ kfree(acl_role_set.r_hash);
52356+ else
52357+ vfree(acl_role_set.r_hash);
52358+ }
52359+ if (name_set.n_hash) {
52360+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
52361+ PAGE_SIZE)
52362+ kfree(name_set.n_hash);
52363+ else
52364+ vfree(name_set.n_hash);
52365+ }
52366+
52367+ if (inodev_set.i_hash) {
52368+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
52369+ PAGE_SIZE)
52370+ kfree(inodev_set.i_hash);
52371+ else
52372+ vfree(inodev_set.i_hash);
52373+ }
52374+
52375+ gr_free_uidset();
52376+
52377+ memset(&name_set, 0, sizeof (struct name_db));
52378+ memset(&inodev_set, 0, sizeof (struct inodev_db));
52379+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
52380+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
52381+
52382+ default_role = NULL;
52383+ kernel_role = NULL;
52384+ role_list = NULL;
52385+
52386+ return;
52387+}
52388+
52389+static __u32
52390+count_user_objs(struct acl_object_label *userp)
52391+{
52392+ struct acl_object_label o_tmp;
52393+ __u32 num = 0;
52394+
52395+ while (userp) {
52396+ if (copy_from_user(&o_tmp, userp,
52397+ sizeof (struct acl_object_label)))
52398+ break;
52399+
52400+ userp = o_tmp.prev;
52401+ num++;
52402+ }
52403+
52404+ return num;
52405+}
52406+
52407+static struct acl_subject_label *
52408+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
52409+
52410+static int
52411+copy_user_glob(struct acl_object_label *obj)
52412+{
52413+ struct acl_object_label *g_tmp, **guser;
52414+ unsigned int len;
52415+ char *tmp;
52416+
52417+ if (obj->globbed == NULL)
52418+ return 0;
52419+
52420+ guser = &obj->globbed;
52421+ while (*guser) {
52422+ g_tmp = (struct acl_object_label *)
52423+ acl_alloc(sizeof (struct acl_object_label));
52424+ if (g_tmp == NULL)
52425+ return -ENOMEM;
52426+
52427+ if (copy_from_user(g_tmp, *guser,
52428+ sizeof (struct acl_object_label)))
52429+ return -EFAULT;
52430+
52431+ len = strnlen_user(g_tmp->filename, PATH_MAX);
52432+
52433+ if (!len || len >= PATH_MAX)
52434+ return -EINVAL;
52435+
52436+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52437+ return -ENOMEM;
52438+
52439+ if (copy_from_user(tmp, g_tmp->filename, len))
52440+ return -EFAULT;
52441+ tmp[len-1] = '\0';
52442+ g_tmp->filename = tmp;
52443+
52444+ *guser = g_tmp;
52445+ guser = &(g_tmp->next);
52446+ }
52447+
52448+ return 0;
52449+}
52450+
52451+static int
52452+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
52453+ struct acl_role_label *role)
52454+{
52455+ struct acl_object_label *o_tmp;
52456+ unsigned int len;
52457+ int ret;
52458+ char *tmp;
52459+
52460+ while (userp) {
52461+ if ((o_tmp = (struct acl_object_label *)
52462+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
52463+ return -ENOMEM;
52464+
52465+ if (copy_from_user(o_tmp, userp,
52466+ sizeof (struct acl_object_label)))
52467+ return -EFAULT;
52468+
52469+ userp = o_tmp->prev;
52470+
52471+ len = strnlen_user(o_tmp->filename, PATH_MAX);
52472+
52473+ if (!len || len >= PATH_MAX)
52474+ return -EINVAL;
52475+
52476+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52477+ return -ENOMEM;
52478+
52479+ if (copy_from_user(tmp, o_tmp->filename, len))
52480+ return -EFAULT;
52481+ tmp[len-1] = '\0';
52482+ o_tmp->filename = tmp;
52483+
52484+ insert_acl_obj_label(o_tmp, subj);
52485+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
52486+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
52487+ return -ENOMEM;
52488+
52489+ ret = copy_user_glob(o_tmp);
52490+ if (ret)
52491+ return ret;
52492+
52493+ if (o_tmp->nested) {
52494+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
52495+ if (IS_ERR(o_tmp->nested))
52496+ return PTR_ERR(o_tmp->nested);
52497+
52498+ /* insert into nested subject list */
52499+ o_tmp->nested->next = role->hash->first;
52500+ role->hash->first = o_tmp->nested;
52501+ }
52502+ }
52503+
52504+ return 0;
52505+}
52506+
52507+static __u32
52508+count_user_subjs(struct acl_subject_label *userp)
52509+{
52510+ struct acl_subject_label s_tmp;
52511+ __u32 num = 0;
52512+
52513+ while (userp) {
52514+ if (copy_from_user(&s_tmp, userp,
52515+ sizeof (struct acl_subject_label)))
52516+ break;
52517+
52518+ userp = s_tmp.prev;
52519+ /* do not count nested subjects against this count, since
52520+ they are not included in the hash table, but are
52521+ attached to objects. We have already counted
52522+ the subjects in userspace for the allocation
52523+ stack
52524+ */
52525+ if (!(s_tmp.mode & GR_NESTED))
52526+ num++;
52527+ }
52528+
52529+ return num;
52530+}
52531+
52532+static int
52533+copy_user_allowedips(struct acl_role_label *rolep)
52534+{
52535+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
52536+
52537+ ruserip = rolep->allowed_ips;
52538+
52539+ while (ruserip) {
52540+ rlast = rtmp;
52541+
52542+ if ((rtmp = (struct role_allowed_ip *)
52543+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
52544+ return -ENOMEM;
52545+
52546+ if (copy_from_user(rtmp, ruserip,
52547+ sizeof (struct role_allowed_ip)))
52548+ return -EFAULT;
52549+
52550+ ruserip = rtmp->prev;
52551+
52552+ if (!rlast) {
52553+ rtmp->prev = NULL;
52554+ rolep->allowed_ips = rtmp;
52555+ } else {
52556+ rlast->next = rtmp;
52557+ rtmp->prev = rlast;
52558+ }
52559+
52560+ if (!ruserip)
52561+ rtmp->next = NULL;
52562+ }
52563+
52564+ return 0;
52565+}
52566+
52567+static int
52568+copy_user_transitions(struct acl_role_label *rolep)
52569+{
52570+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
52571+
52572+ unsigned int len;
52573+ char *tmp;
52574+
52575+ rusertp = rolep->transitions;
52576+
52577+ while (rusertp) {
52578+ rlast = rtmp;
52579+
52580+ if ((rtmp = (struct role_transition *)
52581+ acl_alloc(sizeof (struct role_transition))) == NULL)
52582+ return -ENOMEM;
52583+
52584+ if (copy_from_user(rtmp, rusertp,
52585+ sizeof (struct role_transition)))
52586+ return -EFAULT;
52587+
52588+ rusertp = rtmp->prev;
52589+
52590+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
52591+
52592+ if (!len || len >= GR_SPROLE_LEN)
52593+ return -EINVAL;
52594+
52595+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52596+ return -ENOMEM;
52597+
52598+ if (copy_from_user(tmp, rtmp->rolename, len))
52599+ return -EFAULT;
52600+ tmp[len-1] = '\0';
52601+ rtmp->rolename = tmp;
52602+
52603+ if (!rlast) {
52604+ rtmp->prev = NULL;
52605+ rolep->transitions = rtmp;
52606+ } else {
52607+ rlast->next = rtmp;
52608+ rtmp->prev = rlast;
52609+ }
52610+
52611+ if (!rusertp)
52612+ rtmp->next = NULL;
52613+ }
52614+
52615+ return 0;
52616+}
52617+
52618+static struct acl_subject_label *
52619+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
52620+{
52621+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
52622+ unsigned int len;
52623+ char *tmp;
52624+ __u32 num_objs;
52625+ struct acl_ip_label **i_tmp, *i_utmp2;
52626+ struct gr_hash_struct ghash;
52627+ struct subject_map *subjmap;
52628+ unsigned int i_num;
52629+ int err;
52630+
52631+ s_tmp = lookup_subject_map(userp);
52632+
52633+ /* we've already copied this subject into the kernel, just return
52634+ the reference to it, and don't copy it over again
52635+ */
52636+ if (s_tmp)
52637+ return(s_tmp);
52638+
52639+ if ((s_tmp = (struct acl_subject_label *)
52640+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
52641+ return ERR_PTR(-ENOMEM);
52642+
52643+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
52644+ if (subjmap == NULL)
52645+ return ERR_PTR(-ENOMEM);
52646+
52647+ subjmap->user = userp;
52648+ subjmap->kernel = s_tmp;
52649+ insert_subj_map_entry(subjmap);
52650+
52651+ if (copy_from_user(s_tmp, userp,
52652+ sizeof (struct acl_subject_label)))
52653+ return ERR_PTR(-EFAULT);
52654+
52655+ len = strnlen_user(s_tmp->filename, PATH_MAX);
52656+
52657+ if (!len || len >= PATH_MAX)
52658+ return ERR_PTR(-EINVAL);
52659+
52660+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52661+ return ERR_PTR(-ENOMEM);
52662+
52663+ if (copy_from_user(tmp, s_tmp->filename, len))
52664+ return ERR_PTR(-EFAULT);
52665+ tmp[len-1] = '\0';
52666+ s_tmp->filename = tmp;
52667+
52668+ if (!strcmp(s_tmp->filename, "/"))
52669+ role->root_label = s_tmp;
52670+
52671+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
52672+ return ERR_PTR(-EFAULT);
52673+
52674+ /* copy user and group transition tables */
52675+
52676+ if (s_tmp->user_trans_num) {
52677+ uid_t *uidlist;
52678+
52679+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
52680+ if (uidlist == NULL)
52681+ return ERR_PTR(-ENOMEM);
52682+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
52683+ return ERR_PTR(-EFAULT);
52684+
52685+ s_tmp->user_transitions = uidlist;
52686+ }
52687+
52688+ if (s_tmp->group_trans_num) {
52689+ gid_t *gidlist;
52690+
52691+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
52692+ if (gidlist == NULL)
52693+ return ERR_PTR(-ENOMEM);
52694+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
52695+ return ERR_PTR(-EFAULT);
52696+
52697+ s_tmp->group_transitions = gidlist;
52698+ }
52699+
52700+ /* set up object hash table */
52701+ num_objs = count_user_objs(ghash.first);
52702+
52703+ s_tmp->obj_hash_size = num_objs;
52704+ s_tmp->obj_hash =
52705+ (struct acl_object_label **)
52706+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
52707+
52708+ if (!s_tmp->obj_hash)
52709+ return ERR_PTR(-ENOMEM);
52710+
52711+ memset(s_tmp->obj_hash, 0,
52712+ s_tmp->obj_hash_size *
52713+ sizeof (struct acl_object_label *));
52714+
52715+ /* add in objects */
52716+ err = copy_user_objs(ghash.first, s_tmp, role);
52717+
52718+ if (err)
52719+ return ERR_PTR(err);
52720+
52721+ /* set pointer for parent subject */
52722+ if (s_tmp->parent_subject) {
52723+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
52724+
52725+ if (IS_ERR(s_tmp2))
52726+ return s_tmp2;
52727+
52728+ s_tmp->parent_subject = s_tmp2;
52729+ }
52730+
52731+ /* add in ip acls */
52732+
52733+ if (!s_tmp->ip_num) {
52734+ s_tmp->ips = NULL;
52735+ goto insert;
52736+ }
52737+
52738+ i_tmp =
52739+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
52740+ sizeof (struct acl_ip_label *));
52741+
52742+ if (!i_tmp)
52743+ return ERR_PTR(-ENOMEM);
52744+
52745+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
52746+ *(i_tmp + i_num) =
52747+ (struct acl_ip_label *)
52748+ acl_alloc(sizeof (struct acl_ip_label));
52749+ if (!*(i_tmp + i_num))
52750+ return ERR_PTR(-ENOMEM);
52751+
52752+ if (copy_from_user
52753+ (&i_utmp2, s_tmp->ips + i_num,
52754+ sizeof (struct acl_ip_label *)))
52755+ return ERR_PTR(-EFAULT);
52756+
52757+ if (copy_from_user
52758+ (*(i_tmp + i_num), i_utmp2,
52759+ sizeof (struct acl_ip_label)))
52760+ return ERR_PTR(-EFAULT);
52761+
52762+ if ((*(i_tmp + i_num))->iface == NULL)
52763+ continue;
52764+
52765+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
52766+ if (!len || len >= IFNAMSIZ)
52767+ return ERR_PTR(-EINVAL);
52768+ tmp = acl_alloc(len);
52769+ if (tmp == NULL)
52770+ return ERR_PTR(-ENOMEM);
52771+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
52772+ return ERR_PTR(-EFAULT);
52773+ (*(i_tmp + i_num))->iface = tmp;
52774+ }
52775+
52776+ s_tmp->ips = i_tmp;
52777+
52778+insert:
52779+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
52780+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
52781+ return ERR_PTR(-ENOMEM);
52782+
52783+ return s_tmp;
52784+}
52785+
52786+static int
52787+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
52788+{
52789+ struct acl_subject_label s_pre;
52790+ struct acl_subject_label * ret;
52791+ int err;
52792+
52793+ while (userp) {
52794+ if (copy_from_user(&s_pre, userp,
52795+ sizeof (struct acl_subject_label)))
52796+ return -EFAULT;
52797+
52798+ /* do not add nested subjects here, add
52799+ while parsing objects
52800+ */
52801+
52802+ if (s_pre.mode & GR_NESTED) {
52803+ userp = s_pre.prev;
52804+ continue;
52805+ }
52806+
52807+ ret = do_copy_user_subj(userp, role);
52808+
52809+ err = PTR_ERR(ret);
52810+ if (IS_ERR(ret))
52811+ return err;
52812+
52813+ insert_acl_subj_label(ret, role);
52814+
52815+ userp = s_pre.prev;
52816+ }
52817+
52818+ return 0;
52819+}
52820+
52821+static int
52822+copy_user_acl(struct gr_arg *arg)
52823+{
52824+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
52825+ struct sprole_pw *sptmp;
52826+ struct gr_hash_struct *ghash;
52827+ uid_t *domainlist;
52828+ unsigned int r_num;
52829+ unsigned int len;
52830+ char *tmp;
52831+ int err = 0;
52832+ __u16 i;
52833+ __u32 num_subjs;
52834+
52835+ /* we need a default and kernel role */
52836+ if (arg->role_db.num_roles < 2)
52837+ return -EINVAL;
52838+
52839+ /* copy special role authentication info from userspace */
52840+
52841+ num_sprole_pws = arg->num_sprole_pws;
52842+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
52843+
52844+ if (!acl_special_roles && num_sprole_pws)
52845+ return -ENOMEM;
52846+
52847+ for (i = 0; i < num_sprole_pws; i++) {
52848+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
52849+ if (!sptmp)
52850+ return -ENOMEM;
52851+ if (copy_from_user(sptmp, arg->sprole_pws + i,
52852+ sizeof (struct sprole_pw)))
52853+ return -EFAULT;
52854+
52855+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
52856+
52857+ if (!len || len >= GR_SPROLE_LEN)
52858+ return -EINVAL;
52859+
52860+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52861+ return -ENOMEM;
52862+
52863+ if (copy_from_user(tmp, sptmp->rolename, len))
52864+ return -EFAULT;
52865+
52866+ tmp[len-1] = '\0';
52867+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52868+ printk(KERN_ALERT "Copying special role %s\n", tmp);
52869+#endif
52870+ sptmp->rolename = tmp;
52871+ acl_special_roles[i] = sptmp;
52872+ }
52873+
52874+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
52875+
52876+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
52877+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
52878+
52879+ if (!r_tmp)
52880+ return -ENOMEM;
52881+
52882+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
52883+ sizeof (struct acl_role_label *)))
52884+ return -EFAULT;
52885+
52886+ if (copy_from_user(r_tmp, r_utmp2,
52887+ sizeof (struct acl_role_label)))
52888+ return -EFAULT;
52889+
52890+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
52891+
52892+ if (!len || len >= PATH_MAX)
52893+ return -EINVAL;
52894+
52895+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52896+ return -ENOMEM;
52897+
52898+ if (copy_from_user(tmp, r_tmp->rolename, len))
52899+ return -EFAULT;
52900+
52901+ tmp[len-1] = '\0';
52902+ r_tmp->rolename = tmp;
52903+
52904+ if (!strcmp(r_tmp->rolename, "default")
52905+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
52906+ default_role = r_tmp;
52907+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
52908+ kernel_role = r_tmp;
52909+ }
52910+
52911+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
52912+ return -ENOMEM;
52913+
52914+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
52915+ return -EFAULT;
52916+
52917+ r_tmp->hash = ghash;
52918+
52919+ num_subjs = count_user_subjs(r_tmp->hash->first);
52920+
52921+ r_tmp->subj_hash_size = num_subjs;
52922+ r_tmp->subj_hash =
52923+ (struct acl_subject_label **)
52924+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
52925+
52926+ if (!r_tmp->subj_hash)
52927+ return -ENOMEM;
52928+
52929+ err = copy_user_allowedips(r_tmp);
52930+ if (err)
52931+ return err;
52932+
52933+ /* copy domain info */
52934+ if (r_tmp->domain_children != NULL) {
52935+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
52936+ if (domainlist == NULL)
52937+ return -ENOMEM;
52938+
52939+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
52940+ return -EFAULT;
52941+
52942+ r_tmp->domain_children = domainlist;
52943+ }
52944+
52945+ err = copy_user_transitions(r_tmp);
52946+ if (err)
52947+ return err;
52948+
52949+ memset(r_tmp->subj_hash, 0,
52950+ r_tmp->subj_hash_size *
52951+ sizeof (struct acl_subject_label *));
52952+
52953+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
52954+
52955+ if (err)
52956+ return err;
52957+
52958+ /* set nested subject list to null */
52959+ r_tmp->hash->first = NULL;
52960+
52961+ insert_acl_role_label(r_tmp);
52962+ }
52963+
52964+ if (default_role == NULL || kernel_role == NULL)
52965+ return -EINVAL;
52966+
52967+ return err;
52968+}
52969+
52970+static int
52971+gracl_init(struct gr_arg *args)
52972+{
52973+ int error = 0;
52974+
52975+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
52976+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
52977+
52978+ if (init_variables(args)) {
52979+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
52980+ error = -ENOMEM;
52981+ free_variables();
52982+ goto out;
52983+ }
52984+
52985+ error = copy_user_acl(args);
52986+ free_init_variables();
52987+ if (error) {
52988+ free_variables();
52989+ goto out;
52990+ }
52991+
52992+ if ((error = gr_set_acls(0))) {
52993+ free_variables();
52994+ goto out;
52995+ }
52996+
52997+ pax_open_kernel();
52998+ gr_status |= GR_READY;
52999+ pax_close_kernel();
53000+
53001+ out:
53002+ return error;
53003+}
53004+
53005+/* derived from glibc fnmatch() 0: match, 1: no match*/
53006+
53007+static int
53008+glob_match(const char *p, const char *n)
53009+{
53010+ char c;
53011+
53012+ while ((c = *p++) != '\0') {
53013+ switch (c) {
53014+ case '?':
53015+ if (*n == '\0')
53016+ return 1;
53017+ else if (*n == '/')
53018+ return 1;
53019+ break;
53020+ case '\\':
53021+ if (*n != c)
53022+ return 1;
53023+ break;
53024+ case '*':
53025+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
53026+ if (*n == '/')
53027+ return 1;
53028+ else if (c == '?') {
53029+ if (*n == '\0')
53030+ return 1;
53031+ else
53032+ ++n;
53033+ }
53034+ }
53035+ if (c == '\0') {
53036+ return 0;
53037+ } else {
53038+ const char *endp;
53039+
53040+ if ((endp = strchr(n, '/')) == NULL)
53041+ endp = n + strlen(n);
53042+
53043+ if (c == '[') {
53044+ for (--p; n < endp; ++n)
53045+ if (!glob_match(p, n))
53046+ return 0;
53047+ } else if (c == '/') {
53048+ while (*n != '\0' && *n != '/')
53049+ ++n;
53050+ if (*n == '/' && !glob_match(p, n + 1))
53051+ return 0;
53052+ } else {
53053+ for (--p; n < endp; ++n)
53054+ if (*n == c && !glob_match(p, n))
53055+ return 0;
53056+ }
53057+
53058+ return 1;
53059+ }
53060+ case '[':
53061+ {
53062+ int not;
53063+ char cold;
53064+
53065+ if (*n == '\0' || *n == '/')
53066+ return 1;
53067+
53068+ not = (*p == '!' || *p == '^');
53069+ if (not)
53070+ ++p;
53071+
53072+ c = *p++;
53073+ for (;;) {
53074+ unsigned char fn = (unsigned char)*n;
53075+
53076+ if (c == '\0')
53077+ return 1;
53078+ else {
53079+ if (c == fn)
53080+ goto matched;
53081+ cold = c;
53082+ c = *p++;
53083+
53084+ if (c == '-' && *p != ']') {
53085+ unsigned char cend = *p++;
53086+
53087+ if (cend == '\0')
53088+ return 1;
53089+
53090+ if (cold <= fn && fn <= cend)
53091+ goto matched;
53092+
53093+ c = *p++;
53094+ }
53095+ }
53096+
53097+ if (c == ']')
53098+ break;
53099+ }
53100+ if (!not)
53101+ return 1;
53102+ break;
53103+ matched:
53104+ while (c != ']') {
53105+ if (c == '\0')
53106+ return 1;
53107+
53108+ c = *p++;
53109+ }
53110+ if (not)
53111+ return 1;
53112+ }
53113+ break;
53114+ default:
53115+ if (c != *n)
53116+ return 1;
53117+ }
53118+
53119+ ++n;
53120+ }
53121+
53122+ if (*n == '\0')
53123+ return 0;
53124+
53125+ if (*n == '/')
53126+ return 0;
53127+
53128+ return 1;
53129+}
53130+
53131+static struct acl_object_label *
53132+chk_glob_label(struct acl_object_label *globbed,
53133+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
53134+{
53135+ struct acl_object_label *tmp;
53136+
53137+ if (*path == NULL)
53138+ *path = gr_to_filename_nolock(dentry, mnt);
53139+
53140+ tmp = globbed;
53141+
53142+ while (tmp) {
53143+ if (!glob_match(tmp->filename, *path))
53144+ return tmp;
53145+ tmp = tmp->next;
53146+ }
53147+
53148+ return NULL;
53149+}
53150+
53151+static struct acl_object_label *
53152+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
53153+ const ino_t curr_ino, const dev_t curr_dev,
53154+ const struct acl_subject_label *subj, char **path, const int checkglob)
53155+{
53156+ struct acl_subject_label *tmpsubj;
53157+ struct acl_object_label *retval;
53158+ struct acl_object_label *retval2;
53159+
53160+ tmpsubj = (struct acl_subject_label *) subj;
53161+ read_lock(&gr_inode_lock);
53162+ do {
53163+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
53164+ if (retval) {
53165+ if (checkglob && retval->globbed) {
53166+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
53167+ if (retval2)
53168+ retval = retval2;
53169+ }
53170+ break;
53171+ }
53172+ } while ((tmpsubj = tmpsubj->parent_subject));
53173+ read_unlock(&gr_inode_lock);
53174+
53175+ return retval;
53176+}
53177+
53178+static __inline__ struct acl_object_label *
53179+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
53180+ struct dentry *curr_dentry,
53181+ const struct acl_subject_label *subj, char **path, const int checkglob)
53182+{
53183+ int newglob = checkglob;
53184+ ino_t inode;
53185+ dev_t device;
53186+
53187+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
53188+ as we don't want a / * rule to match instead of the / object
53189+ don't do this for create lookups that call this function though, since they're looking up
53190+ on the parent and thus need globbing checks on all paths
53191+ */
53192+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
53193+ newglob = GR_NO_GLOB;
53194+
53195+ spin_lock(&curr_dentry->d_lock);
53196+ inode = curr_dentry->d_inode->i_ino;
53197+ device = __get_dev(curr_dentry);
53198+ spin_unlock(&curr_dentry->d_lock);
53199+
53200+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
53201+}
53202+
53203+static struct acl_object_label *
53204+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53205+ const struct acl_subject_label *subj, char *path, const int checkglob)
53206+{
53207+ struct dentry *dentry = (struct dentry *) l_dentry;
53208+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
53209+ struct mount *real_mnt = real_mount(mnt);
53210+ struct acl_object_label *retval;
53211+ struct dentry *parent;
53212+
53213+ write_seqlock(&rename_lock);
53214+ br_read_lock(&vfsmount_lock);
53215+
53216+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
53217+#ifdef CONFIG_NET
53218+ mnt == sock_mnt ||
53219+#endif
53220+#ifdef CONFIG_HUGETLBFS
53221+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
53222+#endif
53223+ /* ignore Eric Biederman */
53224+ IS_PRIVATE(l_dentry->d_inode))) {
53225+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
53226+ goto out;
53227+ }
53228+
53229+ for (;;) {
53230+ if (dentry == real_root.dentry && mnt == real_root.mnt)
53231+ break;
53232+
53233+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
53234+ if (!mnt_has_parent(real_mnt))
53235+ break;
53236+
53237+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53238+ if (retval != NULL)
53239+ goto out;
53240+
53241+ dentry = real_mnt->mnt_mountpoint;
53242+ real_mnt = real_mnt->mnt_parent;
53243+ mnt = &real_mnt->mnt;
53244+ continue;
53245+ }
53246+
53247+ parent = dentry->d_parent;
53248+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53249+ if (retval != NULL)
53250+ goto out;
53251+
53252+ dentry = parent;
53253+ }
53254+
53255+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
53256+
53257+ /* real_root is pinned so we don't have to hold a reference */
53258+ if (retval == NULL)
53259+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
53260+out:
53261+ br_read_unlock(&vfsmount_lock);
53262+ write_sequnlock(&rename_lock);
53263+
53264+ BUG_ON(retval == NULL);
53265+
53266+ return retval;
53267+}
53268+
53269+static __inline__ struct acl_object_label *
53270+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53271+ const struct acl_subject_label *subj)
53272+{
53273+ char *path = NULL;
53274+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
53275+}
53276+
53277+static __inline__ struct acl_object_label *
53278+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53279+ const struct acl_subject_label *subj)
53280+{
53281+ char *path = NULL;
53282+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
53283+}
53284+
53285+static __inline__ struct acl_object_label *
53286+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53287+ const struct acl_subject_label *subj, char *path)
53288+{
53289+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
53290+}
53291+
53292+static struct acl_subject_label *
53293+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
53294+ const struct acl_role_label *role)
53295+{
53296+ struct dentry *dentry = (struct dentry *) l_dentry;
53297+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
53298+ struct mount *real_mnt = real_mount(mnt);
53299+ struct acl_subject_label *retval;
53300+ struct dentry *parent;
53301+
53302+ write_seqlock(&rename_lock);
53303+ br_read_lock(&vfsmount_lock);
53304+
53305+ for (;;) {
53306+ if (dentry == real_root.dentry && mnt == real_root.mnt)
53307+ break;
53308+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
53309+ if (!mnt_has_parent(real_mnt))
53310+ break;
53311+
53312+ spin_lock(&dentry->d_lock);
53313+ read_lock(&gr_inode_lock);
53314+ retval =
53315+ lookup_acl_subj_label(dentry->d_inode->i_ino,
53316+ __get_dev(dentry), role);
53317+ read_unlock(&gr_inode_lock);
53318+ spin_unlock(&dentry->d_lock);
53319+ if (retval != NULL)
53320+ goto out;
53321+
53322+ dentry = real_mnt->mnt_mountpoint;
53323+ real_mnt = real_mnt->mnt_parent;
53324+ mnt = &real_mnt->mnt;
53325+ continue;
53326+ }
53327+
53328+ spin_lock(&dentry->d_lock);
53329+ read_lock(&gr_inode_lock);
53330+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
53331+ __get_dev(dentry), role);
53332+ read_unlock(&gr_inode_lock);
53333+ parent = dentry->d_parent;
53334+ spin_unlock(&dentry->d_lock);
53335+
53336+ if (retval != NULL)
53337+ goto out;
53338+
53339+ dentry = parent;
53340+ }
53341+
53342+ spin_lock(&dentry->d_lock);
53343+ read_lock(&gr_inode_lock);
53344+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
53345+ __get_dev(dentry), role);
53346+ read_unlock(&gr_inode_lock);
53347+ spin_unlock(&dentry->d_lock);
53348+
53349+ if (unlikely(retval == NULL)) {
53350+ /* real_root is pinned, we don't need to hold a reference */
53351+ read_lock(&gr_inode_lock);
53352+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
53353+ __get_dev(real_root.dentry), role);
53354+ read_unlock(&gr_inode_lock);
53355+ }
53356+out:
53357+ br_read_unlock(&vfsmount_lock);
53358+ write_sequnlock(&rename_lock);
53359+
53360+ BUG_ON(retval == NULL);
53361+
53362+ return retval;
53363+}
53364+
53365+static void
53366+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
53367+{
53368+ struct task_struct *task = current;
53369+ const struct cred *cred = current_cred();
53370+
53371+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
53372+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53373+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
53374+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
53375+
53376+ return;
53377+}
53378+
53379+static void
53380+gr_log_learn_id_change(const char type, const unsigned int real,
53381+ const unsigned int effective, const unsigned int fs)
53382+{
53383+ struct task_struct *task = current;
53384+ const struct cred *cred = current_cred();
53385+
53386+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
53387+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53388+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
53389+ type, real, effective, fs, &task->signal->saved_ip);
53390+
53391+ return;
53392+}
53393+
53394+__u32
53395+gr_search_file(const struct dentry * dentry, const __u32 mode,
53396+ const struct vfsmount * mnt)
53397+{
53398+ __u32 retval = mode;
53399+ struct acl_subject_label *curracl;
53400+ struct acl_object_label *currobj;
53401+
53402+ if (unlikely(!(gr_status & GR_READY)))
53403+ return (mode & ~GR_AUDITS);
53404+
53405+ curracl = current->acl;
53406+
53407+ currobj = chk_obj_label(dentry, mnt, curracl);
53408+ retval = currobj->mode & mode;
53409+
53410+ /* if we're opening a specified transfer file for writing
53411+ (e.g. /dev/initctl), then transfer our role to init
53412+ */
53413+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
53414+ current->role->roletype & GR_ROLE_PERSIST)) {
53415+ struct task_struct *task = init_pid_ns.child_reaper;
53416+
53417+ if (task->role != current->role) {
53418+ task->acl_sp_role = 0;
53419+ task->acl_role_id = current->acl_role_id;
53420+ task->role = current->role;
53421+ rcu_read_lock();
53422+ read_lock(&grsec_exec_file_lock);
53423+ gr_apply_subject_to_task(task);
53424+ read_unlock(&grsec_exec_file_lock);
53425+ rcu_read_unlock();
53426+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
53427+ }
53428+ }
53429+
53430+ if (unlikely
53431+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
53432+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
53433+ __u32 new_mode = mode;
53434+
53435+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53436+
53437+ retval = new_mode;
53438+
53439+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
53440+ new_mode |= GR_INHERIT;
53441+
53442+ if (!(mode & GR_NOLEARN))
53443+ gr_log_learn(dentry, mnt, new_mode);
53444+ }
53445+
53446+ return retval;
53447+}
53448+
53449+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
53450+ const struct dentry *parent,
53451+ const struct vfsmount *mnt)
53452+{
53453+ struct name_entry *match;
53454+ struct acl_object_label *matchpo;
53455+ struct acl_subject_label *curracl;
53456+ char *path;
53457+
53458+ if (unlikely(!(gr_status & GR_READY)))
53459+ return NULL;
53460+
53461+ preempt_disable();
53462+ path = gr_to_filename_rbac(new_dentry, mnt);
53463+ match = lookup_name_entry_create(path);
53464+
53465+ curracl = current->acl;
53466+
53467+ if (match) {
53468+ read_lock(&gr_inode_lock);
53469+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
53470+ read_unlock(&gr_inode_lock);
53471+
53472+ if (matchpo) {
53473+ preempt_enable();
53474+ return matchpo;
53475+ }
53476+ }
53477+
53478+ // lookup parent
53479+
53480+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
53481+
53482+ preempt_enable();
53483+ return matchpo;
53484+}
53485+
53486+__u32
53487+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
53488+ const struct vfsmount * mnt, const __u32 mode)
53489+{
53490+ struct acl_object_label *matchpo;
53491+ __u32 retval;
53492+
53493+ if (unlikely(!(gr_status & GR_READY)))
53494+ return (mode & ~GR_AUDITS);
53495+
53496+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
53497+
53498+ retval = matchpo->mode & mode;
53499+
53500+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
53501+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
53502+ __u32 new_mode = mode;
53503+
53504+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53505+
53506+ gr_log_learn(new_dentry, mnt, new_mode);
53507+ return new_mode;
53508+ }
53509+
53510+ return retval;
53511+}
53512+
53513+__u32
53514+gr_check_link(const struct dentry * new_dentry,
53515+ const struct dentry * parent_dentry,
53516+ const struct vfsmount * parent_mnt,
53517+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
53518+{
53519+ struct acl_object_label *obj;
53520+ __u32 oldmode, newmode;
53521+ __u32 needmode;
53522+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
53523+ GR_DELETE | GR_INHERIT;
53524+
53525+ if (unlikely(!(gr_status & GR_READY)))
53526+ return (GR_CREATE | GR_LINK);
53527+
53528+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
53529+ oldmode = obj->mode;
53530+
53531+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
53532+ newmode = obj->mode;
53533+
53534+ needmode = newmode & checkmodes;
53535+
53536+ // old name for hardlink must have at least the permissions of the new name
53537+ if ((oldmode & needmode) != needmode)
53538+ goto bad;
53539+
53540+ // if old name had restrictions/auditing, make sure the new name does as well
53541+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
53542+
53543+ // don't allow hardlinking of suid/sgid/fcapped files without permission
53544+ if (is_privileged_binary(old_dentry))
53545+ needmode |= GR_SETID;
53546+
53547+ if ((newmode & needmode) != needmode)
53548+ goto bad;
53549+
53550+ // enforce minimum permissions
53551+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
53552+ return newmode;
53553+bad:
53554+ needmode = oldmode;
53555+ if (is_privileged_binary(old_dentry))
53556+ needmode |= GR_SETID;
53557+
53558+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
53559+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
53560+ return (GR_CREATE | GR_LINK);
53561+ } else if (newmode & GR_SUPPRESS)
53562+ return GR_SUPPRESS;
53563+ else
53564+ return 0;
53565+}
53566+
53567+int
53568+gr_check_hidden_task(const struct task_struct *task)
53569+{
53570+ if (unlikely(!(gr_status & GR_READY)))
53571+ return 0;
53572+
53573+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
53574+ return 1;
53575+
53576+ return 0;
53577+}
53578+
53579+int
53580+gr_check_protected_task(const struct task_struct *task)
53581+{
53582+ if (unlikely(!(gr_status & GR_READY) || !task))
53583+ return 0;
53584+
53585+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
53586+ task->acl != current->acl)
53587+ return 1;
53588+
53589+ return 0;
53590+}
53591+
53592+int
53593+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53594+{
53595+ struct task_struct *p;
53596+ int ret = 0;
53597+
53598+ if (unlikely(!(gr_status & GR_READY) || !pid))
53599+ return ret;
53600+
53601+ read_lock(&tasklist_lock);
53602+ do_each_pid_task(pid, type, p) {
53603+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
53604+ p->acl != current->acl) {
53605+ ret = 1;
53606+ goto out;
53607+ }
53608+ } while_each_pid_task(pid, type, p);
53609+out:
53610+ read_unlock(&tasklist_lock);
53611+
53612+ return ret;
53613+}
53614+
53615+void
53616+gr_copy_label(struct task_struct *tsk)
53617+{
53618+ tsk->signal->used_accept = 0;
53619+ tsk->acl_sp_role = 0;
53620+ tsk->acl_role_id = current->acl_role_id;
53621+ tsk->acl = current->acl;
53622+ tsk->role = current->role;
53623+ tsk->signal->curr_ip = current->signal->curr_ip;
53624+ tsk->signal->saved_ip = current->signal->saved_ip;
53625+ if (current->exec_file)
53626+ get_file(current->exec_file);
53627+ tsk->exec_file = current->exec_file;
53628+ tsk->is_writable = current->is_writable;
53629+ if (unlikely(current->signal->used_accept)) {
53630+ current->signal->curr_ip = 0;
53631+ current->signal->saved_ip = 0;
53632+ }
53633+
53634+ return;
53635+}
53636+
53637+static void
53638+gr_set_proc_res(struct task_struct *task)
53639+{
53640+ struct acl_subject_label *proc;
53641+ unsigned short i;
53642+
53643+ proc = task->acl;
53644+
53645+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
53646+ return;
53647+
53648+ for (i = 0; i < RLIM_NLIMITS; i++) {
53649+ if (!(proc->resmask & (1 << i)))
53650+ continue;
53651+
53652+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
53653+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
53654+ }
53655+
53656+ return;
53657+}
53658+
53659+extern int __gr_process_user_ban(struct user_struct *user);
53660+
53661+int
53662+gr_check_user_change(int real, int effective, int fs)
53663+{
53664+ unsigned int i;
53665+ __u16 num;
53666+ uid_t *uidlist;
53667+ int curuid;
53668+ int realok = 0;
53669+ int effectiveok = 0;
53670+ int fsok = 0;
53671+
53672+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
53673+ struct user_struct *user;
53674+
53675+ if (real == -1)
53676+ goto skipit;
53677+
53678+ user = find_user(real);
53679+ if (user == NULL)
53680+ goto skipit;
53681+
53682+ if (__gr_process_user_ban(user)) {
53683+ /* for find_user */
53684+ free_uid(user);
53685+ return 1;
53686+ }
53687+
53688+ /* for find_user */
53689+ free_uid(user);
53690+
53691+skipit:
53692+#endif
53693+
53694+ if (unlikely(!(gr_status & GR_READY)))
53695+ return 0;
53696+
53697+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53698+ gr_log_learn_id_change('u', real, effective, fs);
53699+
53700+ num = current->acl->user_trans_num;
53701+ uidlist = current->acl->user_transitions;
53702+
53703+ if (uidlist == NULL)
53704+ return 0;
53705+
53706+ if (real == -1)
53707+ realok = 1;
53708+ if (effective == -1)
53709+ effectiveok = 1;
53710+ if (fs == -1)
53711+ fsok = 1;
53712+
53713+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
53714+ for (i = 0; i < num; i++) {
53715+ curuid = (int)uidlist[i];
53716+ if (real == curuid)
53717+ realok = 1;
53718+ if (effective == curuid)
53719+ effectiveok = 1;
53720+ if (fs == curuid)
53721+ fsok = 1;
53722+ }
53723+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
53724+ for (i = 0; i < num; i++) {
53725+ curuid = (int)uidlist[i];
53726+ if (real == curuid)
53727+ break;
53728+ if (effective == curuid)
53729+ break;
53730+ if (fs == curuid)
53731+ break;
53732+ }
53733+ /* not in deny list */
53734+ if (i == num) {
53735+ realok = 1;
53736+ effectiveok = 1;
53737+ fsok = 1;
53738+ }
53739+ }
53740+
53741+ if (realok && effectiveok && fsok)
53742+ return 0;
53743+ else {
53744+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53745+ return 1;
53746+ }
53747+}
53748+
53749+int
53750+gr_check_group_change(int real, int effective, int fs)
53751+{
53752+ unsigned int i;
53753+ __u16 num;
53754+ gid_t *gidlist;
53755+ int curgid;
53756+ int realok = 0;
53757+ int effectiveok = 0;
53758+ int fsok = 0;
53759+
53760+ if (unlikely(!(gr_status & GR_READY)))
53761+ return 0;
53762+
53763+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53764+ gr_log_learn_id_change('g', real, effective, fs);
53765+
53766+ num = current->acl->group_trans_num;
53767+ gidlist = current->acl->group_transitions;
53768+
53769+ if (gidlist == NULL)
53770+ return 0;
53771+
53772+ if (real == -1)
53773+ realok = 1;
53774+ if (effective == -1)
53775+ effectiveok = 1;
53776+ if (fs == -1)
53777+ fsok = 1;
53778+
53779+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
53780+ for (i = 0; i < num; i++) {
53781+ curgid = (int)gidlist[i];
53782+ if (real == curgid)
53783+ realok = 1;
53784+ if (effective == curgid)
53785+ effectiveok = 1;
53786+ if (fs == curgid)
53787+ fsok = 1;
53788+ }
53789+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
53790+ for (i = 0; i < num; i++) {
53791+ curgid = (int)gidlist[i];
53792+ if (real == curgid)
53793+ break;
53794+ if (effective == curgid)
53795+ break;
53796+ if (fs == curgid)
53797+ break;
53798+ }
53799+ /* not in deny list */
53800+ if (i == num) {
53801+ realok = 1;
53802+ effectiveok = 1;
53803+ fsok = 1;
53804+ }
53805+ }
53806+
53807+ if (realok && effectiveok && fsok)
53808+ return 0;
53809+ else {
53810+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53811+ return 1;
53812+ }
53813+}
53814+
53815+extern int gr_acl_is_capable(const int cap);
53816+
53817+void
53818+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
53819+{
53820+ struct acl_role_label *role = task->role;
53821+ struct acl_subject_label *subj = NULL;
53822+ struct acl_object_label *obj;
53823+ struct file *filp;
53824+
53825+ if (unlikely(!(gr_status & GR_READY)))
53826+ return;
53827+
53828+ filp = task->exec_file;
53829+
53830+ /* kernel process, we'll give them the kernel role */
53831+ if (unlikely(!filp)) {
53832+ task->role = kernel_role;
53833+ task->acl = kernel_role->root_label;
53834+ return;
53835+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
53836+ role = lookup_acl_role_label(task, uid, gid);
53837+
53838+ /* don't change the role if we're not a privileged process */
53839+ if (role && task->role != role &&
53840+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
53841+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
53842+ return;
53843+
53844+ /* perform subject lookup in possibly new role
53845+ we can use this result below in the case where role == task->role
53846+ */
53847+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
53848+
53849+ /* if we changed uid/gid, but result in the same role
53850+ and are using inheritance, don't lose the inherited subject
53851+ if current subject is other than what normal lookup
53852+ would result in, we arrived via inheritance, don't
53853+ lose subject
53854+ */
53855+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
53856+ (subj == task->acl)))
53857+ task->acl = subj;
53858+
53859+ task->role = role;
53860+
53861+ task->is_writable = 0;
53862+
53863+ /* ignore additional mmap checks for processes that are writable
53864+ by the default ACL */
53865+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53866+ if (unlikely(obj->mode & GR_WRITE))
53867+ task->is_writable = 1;
53868+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53869+ if (unlikely(obj->mode & GR_WRITE))
53870+ task->is_writable = 1;
53871+
53872+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53873+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53874+#endif
53875+
53876+ gr_set_proc_res(task);
53877+
53878+ return;
53879+}
53880+
53881+int
53882+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53883+ const int unsafe_flags)
53884+{
53885+ struct task_struct *task = current;
53886+ struct acl_subject_label *newacl;
53887+ struct acl_object_label *obj;
53888+ __u32 retmode;
53889+
53890+ if (unlikely(!(gr_status & GR_READY)))
53891+ return 0;
53892+
53893+ newacl = chk_subj_label(dentry, mnt, task->role);
53894+
53895+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
53896+ did an exec
53897+ */
53898+ rcu_read_lock();
53899+ read_lock(&tasklist_lock);
53900+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
53901+ (task->parent->acl->mode & GR_POVERRIDE))) {
53902+ read_unlock(&tasklist_lock);
53903+ rcu_read_unlock();
53904+ goto skip_check;
53905+ }
53906+ read_unlock(&tasklist_lock);
53907+ rcu_read_unlock();
53908+
53909+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
53910+ !(task->role->roletype & GR_ROLE_GOD) &&
53911+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
53912+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
53913+ if (unsafe_flags & LSM_UNSAFE_SHARE)
53914+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
53915+ else
53916+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
53917+ return -EACCES;
53918+ }
53919+
53920+skip_check:
53921+
53922+ obj = chk_obj_label(dentry, mnt, task->acl);
53923+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
53924+
53925+ if (!(task->acl->mode & GR_INHERITLEARN) &&
53926+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
53927+ if (obj->nested)
53928+ task->acl = obj->nested;
53929+ else
53930+ task->acl = newacl;
53931+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
53932+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
53933+
53934+ task->is_writable = 0;
53935+
53936+ /* ignore additional mmap checks for processes that are writable
53937+ by the default ACL */
53938+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
53939+ if (unlikely(obj->mode & GR_WRITE))
53940+ task->is_writable = 1;
53941+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
53942+ if (unlikely(obj->mode & GR_WRITE))
53943+ task->is_writable = 1;
53944+
53945+ gr_set_proc_res(task);
53946+
53947+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53948+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53949+#endif
53950+ return 0;
53951+}
53952+
53953+/* always called with valid inodev ptr */
53954+static void
53955+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
53956+{
53957+ struct acl_object_label *matchpo;
53958+ struct acl_subject_label *matchps;
53959+ struct acl_subject_label *subj;
53960+ struct acl_role_label *role;
53961+ unsigned int x;
53962+
53963+ FOR_EACH_ROLE_START(role)
53964+ FOR_EACH_SUBJECT_START(role, subj, x)
53965+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
53966+ matchpo->mode |= GR_DELETED;
53967+ FOR_EACH_SUBJECT_END(subj,x)
53968+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
53969+ if (subj->inode == ino && subj->device == dev)
53970+ subj->mode |= GR_DELETED;
53971+ FOR_EACH_NESTED_SUBJECT_END(subj)
53972+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
53973+ matchps->mode |= GR_DELETED;
53974+ FOR_EACH_ROLE_END(role)
53975+
53976+ inodev->nentry->deleted = 1;
53977+
53978+ return;
53979+}
53980+
53981+void
53982+gr_handle_delete(const ino_t ino, const dev_t dev)
53983+{
53984+ struct inodev_entry *inodev;
53985+
53986+ if (unlikely(!(gr_status & GR_READY)))
53987+ return;
53988+
53989+ write_lock(&gr_inode_lock);
53990+ inodev = lookup_inodev_entry(ino, dev);
53991+ if (inodev != NULL)
53992+ do_handle_delete(inodev, ino, dev);
53993+ write_unlock(&gr_inode_lock);
53994+
53995+ return;
53996+}
53997+
53998+static void
53999+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
54000+ const ino_t newinode, const dev_t newdevice,
54001+ struct acl_subject_label *subj)
54002+{
54003+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
54004+ struct acl_object_label *match;
54005+
54006+ match = subj->obj_hash[index];
54007+
54008+ while (match && (match->inode != oldinode ||
54009+ match->device != olddevice ||
54010+ !(match->mode & GR_DELETED)))
54011+ match = match->next;
54012+
54013+ if (match && (match->inode == oldinode)
54014+ && (match->device == olddevice)
54015+ && (match->mode & GR_DELETED)) {
54016+ if (match->prev == NULL) {
54017+ subj->obj_hash[index] = match->next;
54018+ if (match->next != NULL)
54019+ match->next->prev = NULL;
54020+ } else {
54021+ match->prev->next = match->next;
54022+ if (match->next != NULL)
54023+ match->next->prev = match->prev;
54024+ }
54025+ match->prev = NULL;
54026+ match->next = NULL;
54027+ match->inode = newinode;
54028+ match->device = newdevice;
54029+ match->mode &= ~GR_DELETED;
54030+
54031+ insert_acl_obj_label(match, subj);
54032+ }
54033+
54034+ return;
54035+}
54036+
54037+static void
54038+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
54039+ const ino_t newinode, const dev_t newdevice,
54040+ struct acl_role_label *role)
54041+{
54042+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
54043+ struct acl_subject_label *match;
54044+
54045+ match = role->subj_hash[index];
54046+
54047+ while (match && (match->inode != oldinode ||
54048+ match->device != olddevice ||
54049+ !(match->mode & GR_DELETED)))
54050+ match = match->next;
54051+
54052+ if (match && (match->inode == oldinode)
54053+ && (match->device == olddevice)
54054+ && (match->mode & GR_DELETED)) {
54055+ if (match->prev == NULL) {
54056+ role->subj_hash[index] = match->next;
54057+ if (match->next != NULL)
54058+ match->next->prev = NULL;
54059+ } else {
54060+ match->prev->next = match->next;
54061+ if (match->next != NULL)
54062+ match->next->prev = match->prev;
54063+ }
54064+ match->prev = NULL;
54065+ match->next = NULL;
54066+ match->inode = newinode;
54067+ match->device = newdevice;
54068+ match->mode &= ~GR_DELETED;
54069+
54070+ insert_acl_subj_label(match, role);
54071+ }
54072+
54073+ return;
54074+}
54075+
54076+static void
54077+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
54078+ const ino_t newinode, const dev_t newdevice)
54079+{
54080+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
54081+ struct inodev_entry *match;
54082+
54083+ match = inodev_set.i_hash[index];
54084+
54085+ while (match && (match->nentry->inode != oldinode ||
54086+ match->nentry->device != olddevice || !match->nentry->deleted))
54087+ match = match->next;
54088+
54089+ if (match && (match->nentry->inode == oldinode)
54090+ && (match->nentry->device == olddevice) &&
54091+ match->nentry->deleted) {
54092+ if (match->prev == NULL) {
54093+ inodev_set.i_hash[index] = match->next;
54094+ if (match->next != NULL)
54095+ match->next->prev = NULL;
54096+ } else {
54097+ match->prev->next = match->next;
54098+ if (match->next != NULL)
54099+ match->next->prev = match->prev;
54100+ }
54101+ match->prev = NULL;
54102+ match->next = NULL;
54103+ match->nentry->inode = newinode;
54104+ match->nentry->device = newdevice;
54105+ match->nentry->deleted = 0;
54106+
54107+ insert_inodev_entry(match);
54108+ }
54109+
54110+ return;
54111+}
54112+
54113+static void
54114+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
54115+{
54116+ struct acl_subject_label *subj;
54117+ struct acl_role_label *role;
54118+ unsigned int x;
54119+
54120+ FOR_EACH_ROLE_START(role)
54121+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
54122+
54123+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
54124+ if ((subj->inode == ino) && (subj->device == dev)) {
54125+ subj->inode = ino;
54126+ subj->device = dev;
54127+ }
54128+ FOR_EACH_NESTED_SUBJECT_END(subj)
54129+ FOR_EACH_SUBJECT_START(role, subj, x)
54130+ update_acl_obj_label(matchn->inode, matchn->device,
54131+ ino, dev, subj);
54132+ FOR_EACH_SUBJECT_END(subj,x)
54133+ FOR_EACH_ROLE_END(role)
54134+
54135+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
54136+
54137+ return;
54138+}
54139+
54140+static void
54141+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
54142+ const struct vfsmount *mnt)
54143+{
54144+ ino_t ino = dentry->d_inode->i_ino;
54145+ dev_t dev = __get_dev(dentry);
54146+
54147+ __do_handle_create(matchn, ino, dev);
54148+
54149+ return;
54150+}
54151+
54152+void
54153+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
54154+{
54155+ struct name_entry *matchn;
54156+
54157+ if (unlikely(!(gr_status & GR_READY)))
54158+ return;
54159+
54160+ preempt_disable();
54161+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
54162+
54163+ if (unlikely((unsigned long)matchn)) {
54164+ write_lock(&gr_inode_lock);
54165+ do_handle_create(matchn, dentry, mnt);
54166+ write_unlock(&gr_inode_lock);
54167+ }
54168+ preempt_enable();
54169+
54170+ return;
54171+}
54172+
54173+void
54174+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
54175+{
54176+ struct name_entry *matchn;
54177+
54178+ if (unlikely(!(gr_status & GR_READY)))
54179+ return;
54180+
54181+ preempt_disable();
54182+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
54183+
54184+ if (unlikely((unsigned long)matchn)) {
54185+ write_lock(&gr_inode_lock);
54186+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
54187+ write_unlock(&gr_inode_lock);
54188+ }
54189+ preempt_enable();
54190+
54191+ return;
54192+}
54193+
54194+void
54195+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
54196+ struct dentry *old_dentry,
54197+ struct dentry *new_dentry,
54198+ struct vfsmount *mnt, const __u8 replace)
54199+{
54200+ struct name_entry *matchn;
54201+ struct inodev_entry *inodev;
54202+ struct inode *inode = new_dentry->d_inode;
54203+ ino_t old_ino = old_dentry->d_inode->i_ino;
54204+ dev_t old_dev = __get_dev(old_dentry);
54205+
54206+ /* vfs_rename swaps the name and parent link for old_dentry and
54207+ new_dentry
54208+ at this point, old_dentry has the new name, parent link, and inode
54209+ for the renamed file
54210+ if a file is being replaced by a rename, new_dentry has the inode
54211+ and name for the replaced file
54212+ */
54213+
54214+ if (unlikely(!(gr_status & GR_READY)))
54215+ return;
54216+
54217+ preempt_disable();
54218+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
54219+
54220+ /* we wouldn't have to check d_inode if it weren't for
54221+ NFS silly-renaming
54222+ */
54223+
54224+ write_lock(&gr_inode_lock);
54225+ if (unlikely(replace && inode)) {
54226+ ino_t new_ino = inode->i_ino;
54227+ dev_t new_dev = __get_dev(new_dentry);
54228+
54229+ inodev = lookup_inodev_entry(new_ino, new_dev);
54230+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
54231+ do_handle_delete(inodev, new_ino, new_dev);
54232+ }
54233+
54234+ inodev = lookup_inodev_entry(old_ino, old_dev);
54235+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
54236+ do_handle_delete(inodev, old_ino, old_dev);
54237+
54238+ if (unlikely((unsigned long)matchn))
54239+ do_handle_create(matchn, old_dentry, mnt);
54240+
54241+ write_unlock(&gr_inode_lock);
54242+ preempt_enable();
54243+
54244+ return;
54245+}
54246+
54247+static int
54248+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
54249+ unsigned char **sum)
54250+{
54251+ struct acl_role_label *r;
54252+ struct role_allowed_ip *ipp;
54253+ struct role_transition *trans;
54254+ unsigned int i;
54255+ int found = 0;
54256+ u32 curr_ip = current->signal->curr_ip;
54257+
54258+ current->signal->saved_ip = curr_ip;
54259+
54260+ /* check transition table */
54261+
54262+ for (trans = current->role->transitions; trans; trans = trans->next) {
54263+ if (!strcmp(rolename, trans->rolename)) {
54264+ found = 1;
54265+ break;
54266+ }
54267+ }
54268+
54269+ if (!found)
54270+ return 0;
54271+
54272+ /* handle special roles that do not require authentication
54273+ and check ip */
54274+
54275+ FOR_EACH_ROLE_START(r)
54276+ if (!strcmp(rolename, r->rolename) &&
54277+ (r->roletype & GR_ROLE_SPECIAL)) {
54278+ found = 0;
54279+ if (r->allowed_ips != NULL) {
54280+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
54281+ if ((ntohl(curr_ip) & ipp->netmask) ==
54282+ (ntohl(ipp->addr) & ipp->netmask))
54283+ found = 1;
54284+ }
54285+ } else
54286+ found = 2;
54287+ if (!found)
54288+ return 0;
54289+
54290+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
54291+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
54292+ *salt = NULL;
54293+ *sum = NULL;
54294+ return 1;
54295+ }
54296+ }
54297+ FOR_EACH_ROLE_END(r)
54298+
54299+ for (i = 0; i < num_sprole_pws; i++) {
54300+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
54301+ *salt = acl_special_roles[i]->salt;
54302+ *sum = acl_special_roles[i]->sum;
54303+ return 1;
54304+ }
54305+ }
54306+
54307+ return 0;
54308+}
54309+
54310+static void
54311+assign_special_role(char *rolename)
54312+{
54313+ struct acl_object_label *obj;
54314+ struct acl_role_label *r;
54315+ struct acl_role_label *assigned = NULL;
54316+ struct task_struct *tsk;
54317+ struct file *filp;
54318+
54319+ FOR_EACH_ROLE_START(r)
54320+ if (!strcmp(rolename, r->rolename) &&
54321+ (r->roletype & GR_ROLE_SPECIAL)) {
54322+ assigned = r;
54323+ break;
54324+ }
54325+ FOR_EACH_ROLE_END(r)
54326+
54327+ if (!assigned)
54328+ return;
54329+
54330+ read_lock(&tasklist_lock);
54331+ read_lock(&grsec_exec_file_lock);
54332+
54333+ tsk = current->real_parent;
54334+ if (tsk == NULL)
54335+ goto out_unlock;
54336+
54337+ filp = tsk->exec_file;
54338+ if (filp == NULL)
54339+ goto out_unlock;
54340+
54341+ tsk->is_writable = 0;
54342+
54343+ tsk->acl_sp_role = 1;
54344+ tsk->acl_role_id = ++acl_sp_role_value;
54345+ tsk->role = assigned;
54346+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
54347+
54348+ /* ignore additional mmap checks for processes that are writable
54349+ by the default ACL */
54350+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54351+ if (unlikely(obj->mode & GR_WRITE))
54352+ tsk->is_writable = 1;
54353+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
54354+ if (unlikely(obj->mode & GR_WRITE))
54355+ tsk->is_writable = 1;
54356+
54357+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54358+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
54359+#endif
54360+
54361+out_unlock:
54362+ read_unlock(&grsec_exec_file_lock);
54363+ read_unlock(&tasklist_lock);
54364+ return;
54365+}
54366+
54367+int gr_check_secure_terminal(struct task_struct *task)
54368+{
54369+ struct task_struct *p, *p2, *p3;
54370+ struct files_struct *files;
54371+ struct fdtable *fdt;
54372+ struct file *our_file = NULL, *file;
54373+ int i;
54374+
54375+ if (task->signal->tty == NULL)
54376+ return 1;
54377+
54378+ files = get_files_struct(task);
54379+ if (files != NULL) {
54380+ rcu_read_lock();
54381+ fdt = files_fdtable(files);
54382+ for (i=0; i < fdt->max_fds; i++) {
54383+ file = fcheck_files(files, i);
54384+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
54385+ get_file(file);
54386+ our_file = file;
54387+ }
54388+ }
54389+ rcu_read_unlock();
54390+ put_files_struct(files);
54391+ }
54392+
54393+ if (our_file == NULL)
54394+ return 1;
54395+
54396+ read_lock(&tasklist_lock);
54397+ do_each_thread(p2, p) {
54398+ files = get_files_struct(p);
54399+ if (files == NULL ||
54400+ (p->signal && p->signal->tty == task->signal->tty)) {
54401+ if (files != NULL)
54402+ put_files_struct(files);
54403+ continue;
54404+ }
54405+ rcu_read_lock();
54406+ fdt = files_fdtable(files);
54407+ for (i=0; i < fdt->max_fds; i++) {
54408+ file = fcheck_files(files, i);
54409+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
54410+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
54411+ p3 = task;
54412+ while (p3->pid > 0) {
54413+ if (p3 == p)
54414+ break;
54415+ p3 = p3->real_parent;
54416+ }
54417+ if (p3 == p)
54418+ break;
54419+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
54420+ gr_handle_alertkill(p);
54421+ rcu_read_unlock();
54422+ put_files_struct(files);
54423+ read_unlock(&tasklist_lock);
54424+ fput(our_file);
54425+ return 0;
54426+ }
54427+ }
54428+ rcu_read_unlock();
54429+ put_files_struct(files);
54430+ } while_each_thread(p2, p);
54431+ read_unlock(&tasklist_lock);
54432+
54433+ fput(our_file);
54434+ return 1;
54435+}
54436+
54437+static int gr_rbac_disable(void *unused)
54438+{
54439+ pax_open_kernel();
54440+ gr_status &= ~GR_READY;
54441+ pax_close_kernel();
54442+
54443+ return 0;
54444+}
54445+
54446+ssize_t
54447+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
54448+{
54449+ struct gr_arg_wrapper uwrap;
54450+ unsigned char *sprole_salt = NULL;
54451+ unsigned char *sprole_sum = NULL;
54452+ int error = sizeof (struct gr_arg_wrapper);
54453+ int error2 = 0;
54454+
54455+ mutex_lock(&gr_dev_mutex);
54456+
54457+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
54458+ error = -EPERM;
54459+ goto out;
54460+ }
54461+
54462+ if (count != sizeof (struct gr_arg_wrapper)) {
54463+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
54464+ error = -EINVAL;
54465+ goto out;
54466+ }
54467+
54468+
54469+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
54470+ gr_auth_expires = 0;
54471+ gr_auth_attempts = 0;
54472+ }
54473+
54474+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
54475+ error = -EFAULT;
54476+ goto out;
54477+ }
54478+
54479+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
54480+ error = -EINVAL;
54481+ goto out;
54482+ }
54483+
54484+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
54485+ error = -EFAULT;
54486+ goto out;
54487+ }
54488+
54489+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
54490+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
54491+ time_after(gr_auth_expires, get_seconds())) {
54492+ error = -EBUSY;
54493+ goto out;
54494+ }
54495+
54496+ /* if non-root trying to do anything other than use a special role,
54497+ do not attempt authentication, do not count towards authentication
54498+ locking
54499+ */
54500+
54501+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
54502+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
54503+ !uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
54504+ error = -EPERM;
54505+ goto out;
54506+ }
54507+
54508+ /* ensure pw and special role name are null terminated */
54509+
54510+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
54511+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
54512+
54513+ /* Okay.
54514+ * We have our enough of the argument structure..(we have yet
54515+ * to copy_from_user the tables themselves) . Copy the tables
54516+ * only if we need them, i.e. for loading operations. */
54517+
54518+ switch (gr_usermode->mode) {
54519+ case GR_STATUS:
54520+ if (gr_status & GR_READY) {
54521+ error = 1;
54522+ if (!gr_check_secure_terminal(current))
54523+ error = 3;
54524+ } else
54525+ error = 2;
54526+ goto out;
54527+ case GR_SHUTDOWN:
54528+ if ((gr_status & GR_READY)
54529+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54530+ stop_machine(gr_rbac_disable, NULL, NULL);
54531+ free_variables();
54532+ memset(gr_usermode, 0, sizeof (struct gr_arg));
54533+ memset(gr_system_salt, 0, GR_SALT_LEN);
54534+ memset(gr_system_sum, 0, GR_SHA_LEN);
54535+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
54536+ } else if (gr_status & GR_READY) {
54537+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
54538+ error = -EPERM;
54539+ } else {
54540+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
54541+ error = -EAGAIN;
54542+ }
54543+ break;
54544+ case GR_ENABLE:
54545+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
54546+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
54547+ else {
54548+ if (gr_status & GR_READY)
54549+ error = -EAGAIN;
54550+ else
54551+ error = error2;
54552+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
54553+ }
54554+ break;
54555+ case GR_RELOAD:
54556+ if (!(gr_status & GR_READY)) {
54557+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
54558+ error = -EAGAIN;
54559+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54560+ stop_machine(gr_rbac_disable, NULL, NULL);
54561+ free_variables();
54562+ error2 = gracl_init(gr_usermode);
54563+ if (!error2)
54564+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
54565+ else {
54566+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
54567+ error = error2;
54568+ }
54569+ } else {
54570+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
54571+ error = -EPERM;
54572+ }
54573+ break;
54574+ case GR_SEGVMOD:
54575+ if (unlikely(!(gr_status & GR_READY))) {
54576+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
54577+ error = -EAGAIN;
54578+ break;
54579+ }
54580+
54581+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54582+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
54583+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
54584+ struct acl_subject_label *segvacl;
54585+ segvacl =
54586+ lookup_acl_subj_label(gr_usermode->segv_inode,
54587+ gr_usermode->segv_device,
54588+ current->role);
54589+ if (segvacl) {
54590+ segvacl->crashes = 0;
54591+ segvacl->expires = 0;
54592+ }
54593+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
54594+ gr_remove_uid(gr_usermode->segv_uid);
54595+ }
54596+ } else {
54597+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
54598+ error = -EPERM;
54599+ }
54600+ break;
54601+ case GR_SPROLE:
54602+ case GR_SPROLEPAM:
54603+ if (unlikely(!(gr_status & GR_READY))) {
54604+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
54605+ error = -EAGAIN;
54606+ break;
54607+ }
54608+
54609+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
54610+ current->role->expires = 0;
54611+ current->role->auth_attempts = 0;
54612+ }
54613+
54614+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
54615+ time_after(current->role->expires, get_seconds())) {
54616+ error = -EBUSY;
54617+ goto out;
54618+ }
54619+
54620+ if (lookup_special_role_auth
54621+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
54622+ && ((!sprole_salt && !sprole_sum)
54623+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
54624+ char *p = "";
54625+ assign_special_role(gr_usermode->sp_role);
54626+ read_lock(&tasklist_lock);
54627+ if (current->real_parent)
54628+ p = current->real_parent->role->rolename;
54629+ read_unlock(&tasklist_lock);
54630+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
54631+ p, acl_sp_role_value);
54632+ } else {
54633+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
54634+ error = -EPERM;
54635+ if(!(current->role->auth_attempts++))
54636+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
54637+
54638+ goto out;
54639+ }
54640+ break;
54641+ case GR_UNSPROLE:
54642+ if (unlikely(!(gr_status & GR_READY))) {
54643+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
54644+ error = -EAGAIN;
54645+ break;
54646+ }
54647+
54648+ if (current->role->roletype & GR_ROLE_SPECIAL) {
54649+ char *p = "";
54650+ int i = 0;
54651+
54652+ read_lock(&tasklist_lock);
54653+ if (current->real_parent) {
54654+ p = current->real_parent->role->rolename;
54655+ i = current->real_parent->acl_role_id;
54656+ }
54657+ read_unlock(&tasklist_lock);
54658+
54659+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
54660+ gr_set_acls(1);
54661+ } else {
54662+ error = -EPERM;
54663+ goto out;
54664+ }
54665+ break;
54666+ default:
54667+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
54668+ error = -EINVAL;
54669+ break;
54670+ }
54671+
54672+ if (error != -EPERM)
54673+ goto out;
54674+
54675+ if(!(gr_auth_attempts++))
54676+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
54677+
54678+ out:
54679+ mutex_unlock(&gr_dev_mutex);
54680+ return error;
54681+}
54682+
54683+/* must be called with
54684+ rcu_read_lock();
54685+ read_lock(&tasklist_lock);
54686+ read_lock(&grsec_exec_file_lock);
54687+*/
54688+int gr_apply_subject_to_task(struct task_struct *task)
54689+{
54690+ struct acl_object_label *obj;
54691+ char *tmpname;
54692+ struct acl_subject_label *tmpsubj;
54693+ struct file *filp;
54694+ struct name_entry *nmatch;
54695+
54696+ filp = task->exec_file;
54697+ if (filp == NULL)
54698+ return 0;
54699+
54700+ /* the following is to apply the correct subject
54701+ on binaries running when the RBAC system
54702+ is enabled, when the binaries have been
54703+ replaced or deleted since their execution
54704+ -----
54705+ when the RBAC system starts, the inode/dev
54706+ from exec_file will be one the RBAC system
54707+ is unaware of. It only knows the inode/dev
54708+ of the present file on disk, or the absence
54709+ of it.
54710+ */
54711+ preempt_disable();
54712+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
54713+
54714+ nmatch = lookup_name_entry(tmpname);
54715+ preempt_enable();
54716+ tmpsubj = NULL;
54717+ if (nmatch) {
54718+ if (nmatch->deleted)
54719+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
54720+ else
54721+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
54722+ if (tmpsubj != NULL)
54723+ task->acl = tmpsubj;
54724+ }
54725+ if (tmpsubj == NULL)
54726+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
54727+ task->role);
54728+ if (task->acl) {
54729+ task->is_writable = 0;
54730+ /* ignore additional mmap checks for processes that are writable
54731+ by the default ACL */
54732+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54733+ if (unlikely(obj->mode & GR_WRITE))
54734+ task->is_writable = 1;
54735+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
54736+ if (unlikely(obj->mode & GR_WRITE))
54737+ task->is_writable = 1;
54738+
54739+ gr_set_proc_res(task);
54740+
54741+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54742+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54743+#endif
54744+ } else {
54745+ return 1;
54746+ }
54747+
54748+ return 0;
54749+}
54750+
54751+int
54752+gr_set_acls(const int type)
54753+{
54754+ struct task_struct *task, *task2;
54755+ struct acl_role_label *role = current->role;
54756+ __u16 acl_role_id = current->acl_role_id;
54757+ const struct cred *cred;
54758+ int ret;
54759+
54760+ rcu_read_lock();
54761+ read_lock(&tasklist_lock);
54762+ read_lock(&grsec_exec_file_lock);
54763+ do_each_thread(task2, task) {
54764+ /* check to see if we're called from the exit handler,
54765+ if so, only replace ACLs that have inherited the admin
54766+ ACL */
54767+
54768+ if (type && (task->role != role ||
54769+ task->acl_role_id != acl_role_id))
54770+ continue;
54771+
54772+ task->acl_role_id = 0;
54773+ task->acl_sp_role = 0;
54774+
54775+ if (task->exec_file) {
54776+ cred = __task_cred(task);
54777+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
54778+ ret = gr_apply_subject_to_task(task);
54779+ if (ret) {
54780+ read_unlock(&grsec_exec_file_lock);
54781+ read_unlock(&tasklist_lock);
54782+ rcu_read_unlock();
54783+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
54784+ return ret;
54785+ }
54786+ } else {
54787+ // it's a kernel process
54788+ task->role = kernel_role;
54789+ task->acl = kernel_role->root_label;
54790+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
54791+ task->acl->mode &= ~GR_PROCFIND;
54792+#endif
54793+ }
54794+ } while_each_thread(task2, task);
54795+ read_unlock(&grsec_exec_file_lock);
54796+ read_unlock(&tasklist_lock);
54797+ rcu_read_unlock();
54798+
54799+ return 0;
54800+}
54801+
54802+void
54803+gr_learn_resource(const struct task_struct *task,
54804+ const int res, const unsigned long wanted, const int gt)
54805+{
54806+ struct acl_subject_label *acl;
54807+ const struct cred *cred;
54808+
54809+ if (unlikely((gr_status & GR_READY) &&
54810+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
54811+ goto skip_reslog;
54812+
54813+#ifdef CONFIG_GRKERNSEC_RESLOG
54814+ gr_log_resource(task, res, wanted, gt);
54815+#endif
54816+ skip_reslog:
54817+
54818+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
54819+ return;
54820+
54821+ acl = task->acl;
54822+
54823+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
54824+ !(acl->resmask & (1 << (unsigned short) res))))
54825+ return;
54826+
54827+ if (wanted >= acl->res[res].rlim_cur) {
54828+ unsigned long res_add;
54829+
54830+ res_add = wanted;
54831+ switch (res) {
54832+ case RLIMIT_CPU:
54833+ res_add += GR_RLIM_CPU_BUMP;
54834+ break;
54835+ case RLIMIT_FSIZE:
54836+ res_add += GR_RLIM_FSIZE_BUMP;
54837+ break;
54838+ case RLIMIT_DATA:
54839+ res_add += GR_RLIM_DATA_BUMP;
54840+ break;
54841+ case RLIMIT_STACK:
54842+ res_add += GR_RLIM_STACK_BUMP;
54843+ break;
54844+ case RLIMIT_CORE:
54845+ res_add += GR_RLIM_CORE_BUMP;
54846+ break;
54847+ case RLIMIT_RSS:
54848+ res_add += GR_RLIM_RSS_BUMP;
54849+ break;
54850+ case RLIMIT_NPROC:
54851+ res_add += GR_RLIM_NPROC_BUMP;
54852+ break;
54853+ case RLIMIT_NOFILE:
54854+ res_add += GR_RLIM_NOFILE_BUMP;
54855+ break;
54856+ case RLIMIT_MEMLOCK:
54857+ res_add += GR_RLIM_MEMLOCK_BUMP;
54858+ break;
54859+ case RLIMIT_AS:
54860+ res_add += GR_RLIM_AS_BUMP;
54861+ break;
54862+ case RLIMIT_LOCKS:
54863+ res_add += GR_RLIM_LOCKS_BUMP;
54864+ break;
54865+ case RLIMIT_SIGPENDING:
54866+ res_add += GR_RLIM_SIGPENDING_BUMP;
54867+ break;
54868+ case RLIMIT_MSGQUEUE:
54869+ res_add += GR_RLIM_MSGQUEUE_BUMP;
54870+ break;
54871+ case RLIMIT_NICE:
54872+ res_add += GR_RLIM_NICE_BUMP;
54873+ break;
54874+ case RLIMIT_RTPRIO:
54875+ res_add += GR_RLIM_RTPRIO_BUMP;
54876+ break;
54877+ case RLIMIT_RTTIME:
54878+ res_add += GR_RLIM_RTTIME_BUMP;
54879+ break;
54880+ }
54881+
54882+ acl->res[res].rlim_cur = res_add;
54883+
54884+ if (wanted > acl->res[res].rlim_max)
54885+ acl->res[res].rlim_max = res_add;
54886+
54887+ /* only log the subject filename, since resource logging is supported for
54888+ single-subject learning only */
54889+ rcu_read_lock();
54890+ cred = __task_cred(task);
54891+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54892+ task->role->roletype, cred->uid, cred->gid, acl->filename,
54893+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
54894+ "", (unsigned long) res, &task->signal->saved_ip);
54895+ rcu_read_unlock();
54896+ }
54897+
54898+ return;
54899+}
54900+
54901+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
54902+void
54903+pax_set_initial_flags(struct linux_binprm *bprm)
54904+{
54905+ struct task_struct *task = current;
54906+ struct acl_subject_label *proc;
54907+ unsigned long flags;
54908+
54909+ if (unlikely(!(gr_status & GR_READY)))
54910+ return;
54911+
54912+ flags = pax_get_flags(task);
54913+
54914+ proc = task->acl;
54915+
54916+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
54917+ flags &= ~MF_PAX_PAGEEXEC;
54918+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
54919+ flags &= ~MF_PAX_SEGMEXEC;
54920+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
54921+ flags &= ~MF_PAX_RANDMMAP;
54922+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
54923+ flags &= ~MF_PAX_EMUTRAMP;
54924+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
54925+ flags &= ~MF_PAX_MPROTECT;
54926+
54927+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
54928+ flags |= MF_PAX_PAGEEXEC;
54929+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
54930+ flags |= MF_PAX_SEGMEXEC;
54931+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
54932+ flags |= MF_PAX_RANDMMAP;
54933+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
54934+ flags |= MF_PAX_EMUTRAMP;
54935+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
54936+ flags |= MF_PAX_MPROTECT;
54937+
54938+ pax_set_flags(task, flags);
54939+
54940+ return;
54941+}
54942+#endif
54943+
54944+int
54945+gr_handle_proc_ptrace(struct task_struct *task)
54946+{
54947+ struct file *filp;
54948+ struct task_struct *tmp = task;
54949+ struct task_struct *curtemp = current;
54950+ __u32 retmode;
54951+
54952+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54953+ if (unlikely(!(gr_status & GR_READY)))
54954+ return 0;
54955+#endif
54956+
54957+ read_lock(&tasklist_lock);
54958+ read_lock(&grsec_exec_file_lock);
54959+ filp = task->exec_file;
54960+
54961+ while (tmp->pid > 0) {
54962+ if (tmp == curtemp)
54963+ break;
54964+ tmp = tmp->real_parent;
54965+ }
54966+
54967+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && !uid_eq(current_uid(), GLOBAL_ROOT_UID) && !(gr_status & GR_READY)) ||
54968+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
54969+ read_unlock(&grsec_exec_file_lock);
54970+ read_unlock(&tasklist_lock);
54971+ return 1;
54972+ }
54973+
54974+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54975+ if (!(gr_status & GR_READY)) {
54976+ read_unlock(&grsec_exec_file_lock);
54977+ read_unlock(&tasklist_lock);
54978+ return 0;
54979+ }
54980+#endif
54981+
54982+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
54983+ read_unlock(&grsec_exec_file_lock);
54984+ read_unlock(&tasklist_lock);
54985+
54986+ if (retmode & GR_NOPTRACE)
54987+ return 1;
54988+
54989+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
54990+ && (current->acl != task->acl || (current->acl != current->role->root_label
54991+ && current->pid != task->pid)))
54992+ return 1;
54993+
54994+ return 0;
54995+}
54996+
54997+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
54998+{
54999+ if (unlikely(!(gr_status & GR_READY)))
55000+ return;
55001+
55002+ if (!(current->role->roletype & GR_ROLE_GOD))
55003+ return;
55004+
55005+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
55006+ p->role->rolename, gr_task_roletype_to_char(p),
55007+ p->acl->filename);
55008+}
55009+
55010+int
55011+gr_handle_ptrace(struct task_struct *task, const long request)
55012+{
55013+ struct task_struct *tmp = task;
55014+ struct task_struct *curtemp = current;
55015+ __u32 retmode;
55016+
55017+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
55018+ if (unlikely(!(gr_status & GR_READY)))
55019+ return 0;
55020+#endif
55021+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
55022+ read_lock(&tasklist_lock);
55023+ while (tmp->pid > 0) {
55024+ if (tmp == curtemp)
55025+ break;
55026+ tmp = tmp->real_parent;
55027+ }
55028+
55029+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && !uid_eq(current_uid(), GLOBAL_ROOT_UID) && !(gr_status & GR_READY)) ||
55030+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
55031+ read_unlock(&tasklist_lock);
55032+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55033+ return 1;
55034+ }
55035+ read_unlock(&tasklist_lock);
55036+ }
55037+
55038+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55039+ if (!(gr_status & GR_READY))
55040+ return 0;
55041+#endif
55042+
55043+ read_lock(&grsec_exec_file_lock);
55044+ if (unlikely(!task->exec_file)) {
55045+ read_unlock(&grsec_exec_file_lock);
55046+ return 0;
55047+ }
55048+
55049+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
55050+ read_unlock(&grsec_exec_file_lock);
55051+
55052+ if (retmode & GR_NOPTRACE) {
55053+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55054+ return 1;
55055+ }
55056+
55057+ if (retmode & GR_PTRACERD) {
55058+ switch (request) {
55059+ case PTRACE_SEIZE:
55060+ case PTRACE_POKETEXT:
55061+ case PTRACE_POKEDATA:
55062+ case PTRACE_POKEUSR:
55063+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
55064+ case PTRACE_SETREGS:
55065+ case PTRACE_SETFPREGS:
55066+#endif
55067+#ifdef CONFIG_X86
55068+ case PTRACE_SETFPXREGS:
55069+#endif
55070+#ifdef CONFIG_ALTIVEC
55071+ case PTRACE_SETVRREGS:
55072+#endif
55073+ return 1;
55074+ default:
55075+ return 0;
55076+ }
55077+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
55078+ !(current->role->roletype & GR_ROLE_GOD) &&
55079+ (current->acl != task->acl)) {
55080+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
55081+ return 1;
55082+ }
55083+
55084+ return 0;
55085+}
55086+
55087+static int is_writable_mmap(const struct file *filp)
55088+{
55089+ struct task_struct *task = current;
55090+ struct acl_object_label *obj, *obj2;
55091+
55092+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
55093+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
55094+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
55095+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
55096+ task->role->root_label);
55097+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
55098+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
55099+ return 1;
55100+ }
55101+ }
55102+ return 0;
55103+}
55104+
55105+int
55106+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
55107+{
55108+ __u32 mode;
55109+
55110+ if (unlikely(!file || !(prot & PROT_EXEC)))
55111+ return 1;
55112+
55113+ if (is_writable_mmap(file))
55114+ return 0;
55115+
55116+ mode =
55117+ gr_search_file(file->f_path.dentry,
55118+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
55119+ file->f_path.mnt);
55120+
55121+ if (!gr_tpe_allow(file))
55122+ return 0;
55123+
55124+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
55125+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55126+ return 0;
55127+ } else if (unlikely(!(mode & GR_EXEC))) {
55128+ return 0;
55129+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
55130+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55131+ return 1;
55132+ }
55133+
55134+ return 1;
55135+}
55136+
55137+int
55138+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
55139+{
55140+ __u32 mode;
55141+
55142+ if (unlikely(!file || !(prot & PROT_EXEC)))
55143+ return 1;
55144+
55145+ if (is_writable_mmap(file))
55146+ return 0;
55147+
55148+ mode =
55149+ gr_search_file(file->f_path.dentry,
55150+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
55151+ file->f_path.mnt);
55152+
55153+ if (!gr_tpe_allow(file))
55154+ return 0;
55155+
55156+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
55157+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55158+ return 0;
55159+ } else if (unlikely(!(mode & GR_EXEC))) {
55160+ return 0;
55161+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
55162+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55163+ return 1;
55164+ }
55165+
55166+ return 1;
55167+}
55168+
55169+void
55170+gr_acl_handle_psacct(struct task_struct *task, const long code)
55171+{
55172+ unsigned long runtime;
55173+ unsigned long cputime;
55174+ unsigned int wday, cday;
55175+ __u8 whr, chr;
55176+ __u8 wmin, cmin;
55177+ __u8 wsec, csec;
55178+ struct timespec timeval;
55179+
55180+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
55181+ !(task->acl->mode & GR_PROCACCT)))
55182+ return;
55183+
55184+ do_posix_clock_monotonic_gettime(&timeval);
55185+ runtime = timeval.tv_sec - task->start_time.tv_sec;
55186+ wday = runtime / (3600 * 24);
55187+ runtime -= wday * (3600 * 24);
55188+ whr = runtime / 3600;
55189+ runtime -= whr * 3600;
55190+ wmin = runtime / 60;
55191+ runtime -= wmin * 60;
55192+ wsec = runtime;
55193+
55194+ cputime = (task->utime + task->stime) / HZ;
55195+ cday = cputime / (3600 * 24);
55196+ cputime -= cday * (3600 * 24);
55197+ chr = cputime / 3600;
55198+ cputime -= chr * 3600;
55199+ cmin = cputime / 60;
55200+ cputime -= cmin * 60;
55201+ csec = cputime;
55202+
55203+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
55204+
55205+ return;
55206+}
55207+
55208+void gr_set_kernel_label(struct task_struct *task)
55209+{
55210+ if (gr_status & GR_READY) {
55211+ task->role = kernel_role;
55212+ task->acl = kernel_role->root_label;
55213+ }
55214+ return;
55215+}
55216+
55217+#ifdef CONFIG_TASKSTATS
55218+int gr_is_taskstats_denied(int pid)
55219+{
55220+ struct task_struct *task;
55221+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55222+ const struct cred *cred;
55223+#endif
55224+ int ret = 0;
55225+
55226+ /* restrict taskstats viewing to un-chrooted root users
55227+ who have the 'view' subject flag if the RBAC system is enabled
55228+ */
55229+
55230+ rcu_read_lock();
55231+ read_lock(&tasklist_lock);
55232+ task = find_task_by_vpid(pid);
55233+ if (task) {
55234+#ifdef CONFIG_GRKERNSEC_CHROOT
55235+ if (proc_is_chrooted(task))
55236+ ret = -EACCES;
55237+#endif
55238+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55239+ cred = __task_cred(task);
55240+#ifdef CONFIG_GRKERNSEC_PROC_USER
55241+ if (!uid_eq(cred->uid, GLOBAL_ROOT_UID))
55242+ ret = -EACCES;
55243+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55244+ if (!uid_eq(cred->uid, GLOBAL_ROOT_UID) && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
55245+ ret = -EACCES;
55246+#endif
55247+#endif
55248+ if (gr_status & GR_READY) {
55249+ if (!(task->acl->mode & GR_VIEW))
55250+ ret = -EACCES;
55251+ }
55252+ } else
55253+ ret = -ENOENT;
55254+
55255+ read_unlock(&tasklist_lock);
55256+ rcu_read_unlock();
55257+
55258+ return ret;
55259+}
55260+#endif
55261+
55262+/* AUXV entries are filled via a descendant of search_binary_handler
55263+ after we've already applied the subject for the target
55264+*/
55265+int gr_acl_enable_at_secure(void)
55266+{
55267+ if (unlikely(!(gr_status & GR_READY)))
55268+ return 0;
55269+
55270+ if (current->acl->mode & GR_ATSECURE)
55271+ return 1;
55272+
55273+ return 0;
55274+}
55275+
55276+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
55277+{
55278+ struct task_struct *task = current;
55279+ struct dentry *dentry = file->f_path.dentry;
55280+ struct vfsmount *mnt = file->f_path.mnt;
55281+ struct acl_object_label *obj, *tmp;
55282+ struct acl_subject_label *subj;
55283+ unsigned int bufsize;
55284+ int is_not_root;
55285+ char *path;
55286+ dev_t dev = __get_dev(dentry);
55287+
55288+ if (unlikely(!(gr_status & GR_READY)))
55289+ return 1;
55290+
55291+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
55292+ return 1;
55293+
55294+ /* ignore Eric Biederman */
55295+ if (IS_PRIVATE(dentry->d_inode))
55296+ return 1;
55297+
55298+ subj = task->acl;
55299+ read_lock(&gr_inode_lock);
55300+ do {
55301+ obj = lookup_acl_obj_label(ino, dev, subj);
55302+ if (obj != NULL) {
55303+ read_unlock(&gr_inode_lock);
55304+ return (obj->mode & GR_FIND) ? 1 : 0;
55305+ }
55306+ } while ((subj = subj->parent_subject));
55307+ read_unlock(&gr_inode_lock);
55308+
55309+ /* this is purely an optimization since we're looking for an object
55310+ for the directory we're doing a readdir on
55311+ if it's possible for any globbed object to match the entry we're
55312+ filling into the directory, then the object we find here will be
55313+ an anchor point with attached globbed objects
55314+ */
55315+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
55316+ if (obj->globbed == NULL)
55317+ return (obj->mode & GR_FIND) ? 1 : 0;
55318+
55319+ is_not_root = ((obj->filename[0] == '/') &&
55320+ (obj->filename[1] == '\0')) ? 0 : 1;
55321+ bufsize = PAGE_SIZE - namelen - is_not_root;
55322+
55323+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
55324+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
55325+ return 1;
55326+
55327+ preempt_disable();
55328+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
55329+ bufsize);
55330+
55331+ bufsize = strlen(path);
55332+
55333+ /* if base is "/", don't append an additional slash */
55334+ if (is_not_root)
55335+ *(path + bufsize) = '/';
55336+ memcpy(path + bufsize + is_not_root, name, namelen);
55337+ *(path + bufsize + namelen + is_not_root) = '\0';
55338+
55339+ tmp = obj->globbed;
55340+ while (tmp) {
55341+ if (!glob_match(tmp->filename, path)) {
55342+ preempt_enable();
55343+ return (tmp->mode & GR_FIND) ? 1 : 0;
55344+ }
55345+ tmp = tmp->next;
55346+ }
55347+ preempt_enable();
55348+ return (obj->mode & GR_FIND) ? 1 : 0;
55349+}
55350+
55351+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
55352+EXPORT_SYMBOL(gr_acl_is_enabled);
55353+#endif
55354+EXPORT_SYMBOL(gr_learn_resource);
55355+EXPORT_SYMBOL(gr_set_kernel_label);
55356+#ifdef CONFIG_SECURITY
55357+EXPORT_SYMBOL(gr_check_user_change);
55358+EXPORT_SYMBOL(gr_check_group_change);
55359+#endif
55360+
55361diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
55362new file mode 100644
55363index 0000000..34fefda
55364--- /dev/null
55365+++ b/grsecurity/gracl_alloc.c
55366@@ -0,0 +1,105 @@
55367+#include <linux/kernel.h>
55368+#include <linux/mm.h>
55369+#include <linux/slab.h>
55370+#include <linux/vmalloc.h>
55371+#include <linux/gracl.h>
55372+#include <linux/grsecurity.h>
55373+
55374+static unsigned long alloc_stack_next = 1;
55375+static unsigned long alloc_stack_size = 1;
55376+static void **alloc_stack;
55377+
55378+static __inline__ int
55379+alloc_pop(void)
55380+{
55381+ if (alloc_stack_next == 1)
55382+ return 0;
55383+
55384+ kfree(alloc_stack[alloc_stack_next - 2]);
55385+
55386+ alloc_stack_next--;
55387+
55388+ return 1;
55389+}
55390+
55391+static __inline__ int
55392+alloc_push(void *buf)
55393+{
55394+ if (alloc_stack_next >= alloc_stack_size)
55395+ return 1;
55396+
55397+ alloc_stack[alloc_stack_next - 1] = buf;
55398+
55399+ alloc_stack_next++;
55400+
55401+ return 0;
55402+}
55403+
55404+void *
55405+acl_alloc(unsigned long len)
55406+{
55407+ void *ret = NULL;
55408+
55409+ if (!len || len > PAGE_SIZE)
55410+ goto out;
55411+
55412+ ret = kmalloc(len, GFP_KERNEL);
55413+
55414+ if (ret) {
55415+ if (alloc_push(ret)) {
55416+ kfree(ret);
55417+ ret = NULL;
55418+ }
55419+ }
55420+
55421+out:
55422+ return ret;
55423+}
55424+
55425+void *
55426+acl_alloc_num(unsigned long num, unsigned long len)
55427+{
55428+ if (!len || (num > (PAGE_SIZE / len)))
55429+ return NULL;
55430+
55431+ return acl_alloc(num * len);
55432+}
55433+
55434+void
55435+acl_free_all(void)
55436+{
55437+ if (gr_acl_is_enabled() || !alloc_stack)
55438+ return;
55439+
55440+ while (alloc_pop()) ;
55441+
55442+ if (alloc_stack) {
55443+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
55444+ kfree(alloc_stack);
55445+ else
55446+ vfree(alloc_stack);
55447+ }
55448+
55449+ alloc_stack = NULL;
55450+ alloc_stack_size = 1;
55451+ alloc_stack_next = 1;
55452+
55453+ return;
55454+}
55455+
55456+int
55457+acl_alloc_stack_init(unsigned long size)
55458+{
55459+ if ((size * sizeof (void *)) <= PAGE_SIZE)
55460+ alloc_stack =
55461+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
55462+ else
55463+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
55464+
55465+ alloc_stack_size = size;
55466+
55467+ if (!alloc_stack)
55468+ return 0;
55469+ else
55470+ return 1;
55471+}
55472diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
55473new file mode 100644
55474index 0000000..6d21049
55475--- /dev/null
55476+++ b/grsecurity/gracl_cap.c
55477@@ -0,0 +1,110 @@
55478+#include <linux/kernel.h>
55479+#include <linux/module.h>
55480+#include <linux/sched.h>
55481+#include <linux/gracl.h>
55482+#include <linux/grsecurity.h>
55483+#include <linux/grinternal.h>
55484+
55485+extern const char *captab_log[];
55486+extern int captab_log_entries;
55487+
55488+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
55489+{
55490+ struct acl_subject_label *curracl;
55491+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55492+ kernel_cap_t cap_audit = __cap_empty_set;
55493+
55494+ if (!gr_acl_is_enabled())
55495+ return 1;
55496+
55497+ curracl = task->acl;
55498+
55499+ cap_drop = curracl->cap_lower;
55500+ cap_mask = curracl->cap_mask;
55501+ cap_audit = curracl->cap_invert_audit;
55502+
55503+ while ((curracl = curracl->parent_subject)) {
55504+ /* if the cap isn't specified in the current computed mask but is specified in the
55505+ current level subject, and is lowered in the current level subject, then add
55506+ it to the set of dropped capabilities
55507+ otherwise, add the current level subject's mask to the current computed mask
55508+ */
55509+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55510+ cap_raise(cap_mask, cap);
55511+ if (cap_raised(curracl->cap_lower, cap))
55512+ cap_raise(cap_drop, cap);
55513+ if (cap_raised(curracl->cap_invert_audit, cap))
55514+ cap_raise(cap_audit, cap);
55515+ }
55516+ }
55517+
55518+ if (!cap_raised(cap_drop, cap)) {
55519+ if (cap_raised(cap_audit, cap))
55520+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
55521+ return 1;
55522+ }
55523+
55524+ curracl = task->acl;
55525+
55526+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
55527+ && cap_raised(cred->cap_effective, cap)) {
55528+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
55529+ task->role->roletype, cred->uid,
55530+ cred->gid, task->exec_file ?
55531+ gr_to_filename(task->exec_file->f_path.dentry,
55532+ task->exec_file->f_path.mnt) : curracl->filename,
55533+ curracl->filename, 0UL,
55534+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
55535+ return 1;
55536+ }
55537+
55538+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
55539+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
55540+
55541+ return 0;
55542+}
55543+
55544+int
55545+gr_acl_is_capable(const int cap)
55546+{
55547+ return gr_task_acl_is_capable(current, current_cred(), cap);
55548+}
55549+
55550+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
55551+{
55552+ struct acl_subject_label *curracl;
55553+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55554+
55555+ if (!gr_acl_is_enabled())
55556+ return 1;
55557+
55558+ curracl = task->acl;
55559+
55560+ cap_drop = curracl->cap_lower;
55561+ cap_mask = curracl->cap_mask;
55562+
55563+ while ((curracl = curracl->parent_subject)) {
55564+ /* if the cap isn't specified in the current computed mask but is specified in the
55565+ current level subject, and is lowered in the current level subject, then add
55566+ it to the set of dropped capabilities
55567+ otherwise, add the current level subject's mask to the current computed mask
55568+ */
55569+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55570+ cap_raise(cap_mask, cap);
55571+ if (cap_raised(curracl->cap_lower, cap))
55572+ cap_raise(cap_drop, cap);
55573+ }
55574+ }
55575+
55576+ if (!cap_raised(cap_drop, cap))
55577+ return 1;
55578+
55579+ return 0;
55580+}
55581+
55582+int
55583+gr_acl_is_capable_nolog(const int cap)
55584+{
55585+ return gr_task_acl_is_capable_nolog(current, cap);
55586+}
55587+
55588diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
55589new file mode 100644
55590index 0000000..d28e241
55591--- /dev/null
55592+++ b/grsecurity/gracl_fs.c
55593@@ -0,0 +1,437 @@
55594+#include <linux/kernel.h>
55595+#include <linux/sched.h>
55596+#include <linux/types.h>
55597+#include <linux/fs.h>
55598+#include <linux/file.h>
55599+#include <linux/stat.h>
55600+#include <linux/grsecurity.h>
55601+#include <linux/grinternal.h>
55602+#include <linux/gracl.h>
55603+
55604+umode_t
55605+gr_acl_umask(void)
55606+{
55607+ if (unlikely(!gr_acl_is_enabled()))
55608+ return 0;
55609+
55610+ return current->role->umask;
55611+}
55612+
55613+__u32
55614+gr_acl_handle_hidden_file(const struct dentry * dentry,
55615+ const struct vfsmount * mnt)
55616+{
55617+ __u32 mode;
55618+
55619+ if (unlikely(!dentry->d_inode))
55620+ return GR_FIND;
55621+
55622+ mode =
55623+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
55624+
55625+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
55626+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55627+ return mode;
55628+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
55629+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55630+ return 0;
55631+ } else if (unlikely(!(mode & GR_FIND)))
55632+ return 0;
55633+
55634+ return GR_FIND;
55635+}
55636+
55637+__u32
55638+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
55639+ int acc_mode)
55640+{
55641+ __u32 reqmode = GR_FIND;
55642+ __u32 mode;
55643+
55644+ if (unlikely(!dentry->d_inode))
55645+ return reqmode;
55646+
55647+ if (acc_mode & MAY_APPEND)
55648+ reqmode |= GR_APPEND;
55649+ else if (acc_mode & MAY_WRITE)
55650+ reqmode |= GR_WRITE;
55651+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
55652+ reqmode |= GR_READ;
55653+
55654+ mode =
55655+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55656+ mnt);
55657+
55658+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55659+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55660+ reqmode & GR_READ ? " reading" : "",
55661+ reqmode & GR_WRITE ? " writing" : reqmode &
55662+ GR_APPEND ? " appending" : "");
55663+ return reqmode;
55664+ } else
55665+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55666+ {
55667+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55668+ reqmode & GR_READ ? " reading" : "",
55669+ reqmode & GR_WRITE ? " writing" : reqmode &
55670+ GR_APPEND ? " appending" : "");
55671+ return 0;
55672+ } else if (unlikely((mode & reqmode) != reqmode))
55673+ return 0;
55674+
55675+ return reqmode;
55676+}
55677+
55678+__u32
55679+gr_acl_handle_creat(const struct dentry * dentry,
55680+ const struct dentry * p_dentry,
55681+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
55682+ const int imode)
55683+{
55684+ __u32 reqmode = GR_WRITE | GR_CREATE;
55685+ __u32 mode;
55686+
55687+ if (acc_mode & MAY_APPEND)
55688+ reqmode |= GR_APPEND;
55689+ // if a directory was required or the directory already exists, then
55690+ // don't count this open as a read
55691+ if ((acc_mode & MAY_READ) &&
55692+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
55693+ reqmode |= GR_READ;
55694+ if ((open_flags & O_CREAT) &&
55695+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
55696+ reqmode |= GR_SETID;
55697+
55698+ mode =
55699+ gr_check_create(dentry, p_dentry, p_mnt,
55700+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55701+
55702+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55703+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55704+ reqmode & GR_READ ? " reading" : "",
55705+ reqmode & GR_WRITE ? " writing" : reqmode &
55706+ GR_APPEND ? " appending" : "");
55707+ return reqmode;
55708+ } else
55709+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55710+ {
55711+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55712+ reqmode & GR_READ ? " reading" : "",
55713+ reqmode & GR_WRITE ? " writing" : reqmode &
55714+ GR_APPEND ? " appending" : "");
55715+ return 0;
55716+ } else if (unlikely((mode & reqmode) != reqmode))
55717+ return 0;
55718+
55719+ return reqmode;
55720+}
55721+
55722+__u32
55723+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55724+ const int fmode)
55725+{
55726+ __u32 mode, reqmode = GR_FIND;
55727+
55728+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55729+ reqmode |= GR_EXEC;
55730+ if (fmode & S_IWOTH)
55731+ reqmode |= GR_WRITE;
55732+ if (fmode & S_IROTH)
55733+ reqmode |= GR_READ;
55734+
55735+ mode =
55736+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55737+ mnt);
55738+
55739+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55740+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55741+ reqmode & GR_READ ? " reading" : "",
55742+ reqmode & GR_WRITE ? " writing" : "",
55743+ reqmode & GR_EXEC ? " executing" : "");
55744+ return reqmode;
55745+ } else
55746+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55747+ {
55748+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55749+ reqmode & GR_READ ? " reading" : "",
55750+ reqmode & GR_WRITE ? " writing" : "",
55751+ reqmode & GR_EXEC ? " executing" : "");
55752+ return 0;
55753+ } else if (unlikely((mode & reqmode) != reqmode))
55754+ return 0;
55755+
55756+ return reqmode;
55757+}
55758+
55759+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
55760+{
55761+ __u32 mode;
55762+
55763+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
55764+
55765+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55766+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
55767+ return mode;
55768+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55769+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
55770+ return 0;
55771+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
55772+ return 0;
55773+
55774+ return (reqmode);
55775+}
55776+
55777+__u32
55778+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
55779+{
55780+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
55781+}
55782+
55783+__u32
55784+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
55785+{
55786+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
55787+}
55788+
55789+__u32
55790+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
55791+{
55792+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
55793+}
55794+
55795+__u32
55796+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
55797+{
55798+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
55799+}
55800+
55801+__u32
55802+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
55803+ umode_t *modeptr)
55804+{
55805+ umode_t mode;
55806+
55807+ *modeptr &= ~gr_acl_umask();
55808+ mode = *modeptr;
55809+
55810+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
55811+ return 1;
55812+
55813+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
55814+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
55815+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55816+ GR_CHMOD_ACL_MSG);
55817+ } else {
55818+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
55819+ }
55820+}
55821+
55822+__u32
55823+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
55824+{
55825+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
55826+}
55827+
55828+__u32
55829+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
55830+{
55831+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
55832+}
55833+
55834+__u32
55835+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
55836+{
55837+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
55838+}
55839+
55840+__u32
55841+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
55842+{
55843+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
55844+ GR_UNIXCONNECT_ACL_MSG);
55845+}
55846+
55847+/* hardlinks require at minimum create and link permission,
55848+ any additional privilege required is based on the
55849+ privilege of the file being linked to
55850+*/
55851+__u32
55852+gr_acl_handle_link(const struct dentry * new_dentry,
55853+ const struct dentry * parent_dentry,
55854+ const struct vfsmount * parent_mnt,
55855+ const struct dentry * old_dentry,
55856+ const struct vfsmount * old_mnt, const char *to)
55857+{
55858+ __u32 mode;
55859+ __u32 needmode = GR_CREATE | GR_LINK;
55860+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
55861+
55862+ mode =
55863+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
55864+ old_mnt);
55865+
55866+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
55867+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55868+ return mode;
55869+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55870+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55871+ return 0;
55872+ } else if (unlikely((mode & needmode) != needmode))
55873+ return 0;
55874+
55875+ return 1;
55876+}
55877+
55878+__u32
55879+gr_acl_handle_symlink(const struct dentry * new_dentry,
55880+ const struct dentry * parent_dentry,
55881+ const struct vfsmount * parent_mnt, const char *from)
55882+{
55883+ __u32 needmode = GR_WRITE | GR_CREATE;
55884+ __u32 mode;
55885+
55886+ mode =
55887+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
55888+ GR_CREATE | GR_AUDIT_CREATE |
55889+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
55890+
55891+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
55892+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55893+ return mode;
55894+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55895+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55896+ return 0;
55897+ } else if (unlikely((mode & needmode) != needmode))
55898+ return 0;
55899+
55900+ return (GR_WRITE | GR_CREATE);
55901+}
55902+
55903+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
55904+{
55905+ __u32 mode;
55906+
55907+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55908+
55909+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55910+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
55911+ return mode;
55912+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55913+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
55914+ return 0;
55915+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
55916+ return 0;
55917+
55918+ return (reqmode);
55919+}
55920+
55921+__u32
55922+gr_acl_handle_mknod(const struct dentry * new_dentry,
55923+ const struct dentry * parent_dentry,
55924+ const struct vfsmount * parent_mnt,
55925+ const int mode)
55926+{
55927+ __u32 reqmode = GR_WRITE | GR_CREATE;
55928+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
55929+ reqmode |= GR_SETID;
55930+
55931+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55932+ reqmode, GR_MKNOD_ACL_MSG);
55933+}
55934+
55935+__u32
55936+gr_acl_handle_mkdir(const struct dentry *new_dentry,
55937+ const struct dentry *parent_dentry,
55938+ const struct vfsmount *parent_mnt)
55939+{
55940+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55941+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
55942+}
55943+
55944+#define RENAME_CHECK_SUCCESS(old, new) \
55945+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
55946+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
55947+
55948+int
55949+gr_acl_handle_rename(struct dentry *new_dentry,
55950+ struct dentry *parent_dentry,
55951+ const struct vfsmount *parent_mnt,
55952+ struct dentry *old_dentry,
55953+ struct inode *old_parent_inode,
55954+ struct vfsmount *old_mnt, const char *newname)
55955+{
55956+ __u32 comp1, comp2;
55957+ int error = 0;
55958+
55959+ if (unlikely(!gr_acl_is_enabled()))
55960+ return 0;
55961+
55962+ if (!new_dentry->d_inode) {
55963+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
55964+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
55965+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
55966+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
55967+ GR_DELETE | GR_AUDIT_DELETE |
55968+ GR_AUDIT_READ | GR_AUDIT_WRITE |
55969+ GR_SUPPRESS, old_mnt);
55970+ } else {
55971+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
55972+ GR_CREATE | GR_DELETE |
55973+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
55974+ GR_AUDIT_READ | GR_AUDIT_WRITE |
55975+ GR_SUPPRESS, parent_mnt);
55976+ comp2 =
55977+ gr_search_file(old_dentry,
55978+ GR_READ | GR_WRITE | GR_AUDIT_READ |
55979+ GR_DELETE | GR_AUDIT_DELETE |
55980+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
55981+ }
55982+
55983+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
55984+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
55985+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55986+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
55987+ && !(comp2 & GR_SUPPRESS)) {
55988+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55989+ error = -EACCES;
55990+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
55991+ error = -EACCES;
55992+
55993+ return error;
55994+}
55995+
55996+void
55997+gr_acl_handle_exit(void)
55998+{
55999+ u16 id;
56000+ char *rolename;
56001+ struct file *exec_file;
56002+
56003+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
56004+ !(current->role->roletype & GR_ROLE_PERSIST))) {
56005+ id = current->acl_role_id;
56006+ rolename = current->role->rolename;
56007+ gr_set_acls(1);
56008+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
56009+ }
56010+
56011+ write_lock(&grsec_exec_file_lock);
56012+ exec_file = current->exec_file;
56013+ current->exec_file = NULL;
56014+ write_unlock(&grsec_exec_file_lock);
56015+
56016+ if (exec_file)
56017+ fput(exec_file);
56018+}
56019+
56020+int
56021+gr_acl_handle_procpidmem(const struct task_struct *task)
56022+{
56023+ if (unlikely(!gr_acl_is_enabled()))
56024+ return 0;
56025+
56026+ if (task != current && task->acl->mode & GR_PROTPROCFD)
56027+ return -EACCES;
56028+
56029+ return 0;
56030+}
56031diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
56032new file mode 100644
56033index 0000000..58800a7
56034--- /dev/null
56035+++ b/grsecurity/gracl_ip.c
56036@@ -0,0 +1,384 @@
56037+#include <linux/kernel.h>
56038+#include <asm/uaccess.h>
56039+#include <asm/errno.h>
56040+#include <net/sock.h>
56041+#include <linux/file.h>
56042+#include <linux/fs.h>
56043+#include <linux/net.h>
56044+#include <linux/in.h>
56045+#include <linux/skbuff.h>
56046+#include <linux/ip.h>
56047+#include <linux/udp.h>
56048+#include <linux/types.h>
56049+#include <linux/sched.h>
56050+#include <linux/netdevice.h>
56051+#include <linux/inetdevice.h>
56052+#include <linux/gracl.h>
56053+#include <linux/grsecurity.h>
56054+#include <linux/grinternal.h>
56055+
56056+#define GR_BIND 0x01
56057+#define GR_CONNECT 0x02
56058+#define GR_INVERT 0x04
56059+#define GR_BINDOVERRIDE 0x08
56060+#define GR_CONNECTOVERRIDE 0x10
56061+#define GR_SOCK_FAMILY 0x20
56062+
56063+static const char * gr_protocols[IPPROTO_MAX] = {
56064+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
56065+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
56066+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
56067+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
56068+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
56069+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
56070+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
56071+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
56072+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
56073+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
56074+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
56075+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
56076+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
56077+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
56078+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
56079+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
56080+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
56081+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
56082+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
56083+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
56084+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
56085+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
56086+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
56087+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
56088+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
56089+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
56090+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
56091+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
56092+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
56093+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
56094+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
56095+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
56096+ };
56097+
56098+static const char * gr_socktypes[SOCK_MAX] = {
56099+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
56100+ "unknown:7", "unknown:8", "unknown:9", "packet"
56101+ };
56102+
56103+static const char * gr_sockfamilies[AF_MAX+1] = {
56104+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
56105+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
56106+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
56107+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
56108+ };
56109+
56110+const char *
56111+gr_proto_to_name(unsigned char proto)
56112+{
56113+ return gr_protocols[proto];
56114+}
56115+
56116+const char *
56117+gr_socktype_to_name(unsigned char type)
56118+{
56119+ return gr_socktypes[type];
56120+}
56121+
56122+const char *
56123+gr_sockfamily_to_name(unsigned char family)
56124+{
56125+ return gr_sockfamilies[family];
56126+}
56127+
56128+int
56129+gr_search_socket(const int domain, const int type, const int protocol)
56130+{
56131+ struct acl_subject_label *curr;
56132+ const struct cred *cred = current_cred();
56133+
56134+ if (unlikely(!gr_acl_is_enabled()))
56135+ goto exit;
56136+
56137+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
56138+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
56139+ goto exit; // let the kernel handle it
56140+
56141+ curr = current->acl;
56142+
56143+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
56144+ /* the family is allowed, if this is PF_INET allow it only if
56145+ the extra sock type/protocol checks pass */
56146+ if (domain == PF_INET)
56147+ goto inet_check;
56148+ goto exit;
56149+ } else {
56150+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56151+ __u32 fakeip = 0;
56152+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56153+ current->role->roletype, cred->uid,
56154+ cred->gid, current->exec_file ?
56155+ gr_to_filename(current->exec_file->f_path.dentry,
56156+ current->exec_file->f_path.mnt) :
56157+ curr->filename, curr->filename,
56158+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
56159+ &current->signal->saved_ip);
56160+ goto exit;
56161+ }
56162+ goto exit_fail;
56163+ }
56164+
56165+inet_check:
56166+ /* the rest of this checking is for IPv4 only */
56167+ if (!curr->ips)
56168+ goto exit;
56169+
56170+ if ((curr->ip_type & (1 << type)) &&
56171+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
56172+ goto exit;
56173+
56174+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56175+ /* we don't place acls on raw sockets , and sometimes
56176+ dgram/ip sockets are opened for ioctl and not
56177+ bind/connect, so we'll fake a bind learn log */
56178+ if (type == SOCK_RAW || type == SOCK_PACKET) {
56179+ __u32 fakeip = 0;
56180+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56181+ current->role->roletype, cred->uid,
56182+ cred->gid, current->exec_file ?
56183+ gr_to_filename(current->exec_file->f_path.dentry,
56184+ current->exec_file->f_path.mnt) :
56185+ curr->filename, curr->filename,
56186+ &fakeip, 0, type,
56187+ protocol, GR_CONNECT, &current->signal->saved_ip);
56188+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
56189+ __u32 fakeip = 0;
56190+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56191+ current->role->roletype, cred->uid,
56192+ cred->gid, current->exec_file ?
56193+ gr_to_filename(current->exec_file->f_path.dentry,
56194+ current->exec_file->f_path.mnt) :
56195+ curr->filename, curr->filename,
56196+ &fakeip, 0, type,
56197+ protocol, GR_BIND, &current->signal->saved_ip);
56198+ }
56199+ /* we'll log when they use connect or bind */
56200+ goto exit;
56201+ }
56202+
56203+exit_fail:
56204+ if (domain == PF_INET)
56205+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
56206+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
56207+ else
56208+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
56209+ gr_socktype_to_name(type), protocol);
56210+
56211+ return 0;
56212+exit:
56213+ return 1;
56214+}
56215+
56216+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
56217+{
56218+ if ((ip->mode & mode) &&
56219+ (ip_port >= ip->low) &&
56220+ (ip_port <= ip->high) &&
56221+ ((ntohl(ip_addr) & our_netmask) ==
56222+ (ntohl(our_addr) & our_netmask))
56223+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
56224+ && (ip->type & (1 << type))) {
56225+ if (ip->mode & GR_INVERT)
56226+ return 2; // specifically denied
56227+ else
56228+ return 1; // allowed
56229+ }
56230+
56231+ return 0; // not specifically allowed, may continue parsing
56232+}
56233+
56234+static int
56235+gr_search_connectbind(const int full_mode, struct sock *sk,
56236+ struct sockaddr_in *addr, const int type)
56237+{
56238+ char iface[IFNAMSIZ] = {0};
56239+ struct acl_subject_label *curr;
56240+ struct acl_ip_label *ip;
56241+ struct inet_sock *isk;
56242+ struct net_device *dev;
56243+ struct in_device *idev;
56244+ unsigned long i;
56245+ int ret;
56246+ int mode = full_mode & (GR_BIND | GR_CONNECT);
56247+ __u32 ip_addr = 0;
56248+ __u32 our_addr;
56249+ __u32 our_netmask;
56250+ char *p;
56251+ __u16 ip_port = 0;
56252+ const struct cred *cred = current_cred();
56253+
56254+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
56255+ return 0;
56256+
56257+ curr = current->acl;
56258+ isk = inet_sk(sk);
56259+
56260+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
56261+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
56262+ addr->sin_addr.s_addr = curr->inaddr_any_override;
56263+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
56264+ struct sockaddr_in saddr;
56265+ int err;
56266+
56267+ saddr.sin_family = AF_INET;
56268+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
56269+ saddr.sin_port = isk->inet_sport;
56270+
56271+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
56272+ if (err)
56273+ return err;
56274+
56275+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
56276+ if (err)
56277+ return err;
56278+ }
56279+
56280+ if (!curr->ips)
56281+ return 0;
56282+
56283+ ip_addr = addr->sin_addr.s_addr;
56284+ ip_port = ntohs(addr->sin_port);
56285+
56286+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56287+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56288+ current->role->roletype, cred->uid,
56289+ cred->gid, current->exec_file ?
56290+ gr_to_filename(current->exec_file->f_path.dentry,
56291+ current->exec_file->f_path.mnt) :
56292+ curr->filename, curr->filename,
56293+ &ip_addr, ip_port, type,
56294+ sk->sk_protocol, mode, &current->signal->saved_ip);
56295+ return 0;
56296+ }
56297+
56298+ for (i = 0; i < curr->ip_num; i++) {
56299+ ip = *(curr->ips + i);
56300+ if (ip->iface != NULL) {
56301+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
56302+ p = strchr(iface, ':');
56303+ if (p != NULL)
56304+ *p = '\0';
56305+ dev = dev_get_by_name(sock_net(sk), iface);
56306+ if (dev == NULL)
56307+ continue;
56308+ idev = in_dev_get(dev);
56309+ if (idev == NULL) {
56310+ dev_put(dev);
56311+ continue;
56312+ }
56313+ rcu_read_lock();
56314+ for_ifa(idev) {
56315+ if (!strcmp(ip->iface, ifa->ifa_label)) {
56316+ our_addr = ifa->ifa_address;
56317+ our_netmask = 0xffffffff;
56318+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
56319+ if (ret == 1) {
56320+ rcu_read_unlock();
56321+ in_dev_put(idev);
56322+ dev_put(dev);
56323+ return 0;
56324+ } else if (ret == 2) {
56325+ rcu_read_unlock();
56326+ in_dev_put(idev);
56327+ dev_put(dev);
56328+ goto denied;
56329+ }
56330+ }
56331+ } endfor_ifa(idev);
56332+ rcu_read_unlock();
56333+ in_dev_put(idev);
56334+ dev_put(dev);
56335+ } else {
56336+ our_addr = ip->addr;
56337+ our_netmask = ip->netmask;
56338+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
56339+ if (ret == 1)
56340+ return 0;
56341+ else if (ret == 2)
56342+ goto denied;
56343+ }
56344+ }
56345+
56346+denied:
56347+ if (mode == GR_BIND)
56348+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
56349+ else if (mode == GR_CONNECT)
56350+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
56351+
56352+ return -EACCES;
56353+}
56354+
56355+int
56356+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
56357+{
56358+ /* always allow disconnection of dgram sockets with connect */
56359+ if (addr->sin_family == AF_UNSPEC)
56360+ return 0;
56361+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
56362+}
56363+
56364+int
56365+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
56366+{
56367+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
56368+}
56369+
56370+int gr_search_listen(struct socket *sock)
56371+{
56372+ struct sock *sk = sock->sk;
56373+ struct sockaddr_in addr;
56374+
56375+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
56376+ addr.sin_port = inet_sk(sk)->inet_sport;
56377+
56378+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
56379+}
56380+
56381+int gr_search_accept(struct socket *sock)
56382+{
56383+ struct sock *sk = sock->sk;
56384+ struct sockaddr_in addr;
56385+
56386+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
56387+ addr.sin_port = inet_sk(sk)->inet_sport;
56388+
56389+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
56390+}
56391+
56392+int
56393+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
56394+{
56395+ if (addr)
56396+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
56397+ else {
56398+ struct sockaddr_in sin;
56399+ const struct inet_sock *inet = inet_sk(sk);
56400+
56401+ sin.sin_addr.s_addr = inet->inet_daddr;
56402+ sin.sin_port = inet->inet_dport;
56403+
56404+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
56405+ }
56406+}
56407+
56408+int
56409+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
56410+{
56411+ struct sockaddr_in sin;
56412+
56413+ if (unlikely(skb->len < sizeof (struct udphdr)))
56414+ return 0; // skip this packet
56415+
56416+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
56417+ sin.sin_port = udp_hdr(skb)->source;
56418+
56419+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
56420+}
56421diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
56422new file mode 100644
56423index 0000000..25f54ef
56424--- /dev/null
56425+++ b/grsecurity/gracl_learn.c
56426@@ -0,0 +1,207 @@
56427+#include <linux/kernel.h>
56428+#include <linux/mm.h>
56429+#include <linux/sched.h>
56430+#include <linux/poll.h>
56431+#include <linux/string.h>
56432+#include <linux/file.h>
56433+#include <linux/types.h>
56434+#include <linux/vmalloc.h>
56435+#include <linux/grinternal.h>
56436+
56437+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
56438+ size_t count, loff_t *ppos);
56439+extern int gr_acl_is_enabled(void);
56440+
56441+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
56442+static int gr_learn_attached;
56443+
56444+/* use a 512k buffer */
56445+#define LEARN_BUFFER_SIZE (512 * 1024)
56446+
56447+static DEFINE_SPINLOCK(gr_learn_lock);
56448+static DEFINE_MUTEX(gr_learn_user_mutex);
56449+
56450+/* we need to maintain two buffers, so that the kernel context of grlearn
56451+ uses a semaphore around the userspace copying, and the other kernel contexts
56452+ use a spinlock when copying into the buffer, since they cannot sleep
56453+*/
56454+static char *learn_buffer;
56455+static char *learn_buffer_user;
56456+static int learn_buffer_len;
56457+static int learn_buffer_user_len;
56458+
56459+static ssize_t
56460+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
56461+{
56462+ DECLARE_WAITQUEUE(wait, current);
56463+ ssize_t retval = 0;
56464+
56465+ add_wait_queue(&learn_wait, &wait);
56466+ set_current_state(TASK_INTERRUPTIBLE);
56467+ do {
56468+ mutex_lock(&gr_learn_user_mutex);
56469+ spin_lock(&gr_learn_lock);
56470+ if (learn_buffer_len)
56471+ break;
56472+ spin_unlock(&gr_learn_lock);
56473+ mutex_unlock(&gr_learn_user_mutex);
56474+ if (file->f_flags & O_NONBLOCK) {
56475+ retval = -EAGAIN;
56476+ goto out;
56477+ }
56478+ if (signal_pending(current)) {
56479+ retval = -ERESTARTSYS;
56480+ goto out;
56481+ }
56482+
56483+ schedule();
56484+ } while (1);
56485+
56486+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
56487+ learn_buffer_user_len = learn_buffer_len;
56488+ retval = learn_buffer_len;
56489+ learn_buffer_len = 0;
56490+
56491+ spin_unlock(&gr_learn_lock);
56492+
56493+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
56494+ retval = -EFAULT;
56495+
56496+ mutex_unlock(&gr_learn_user_mutex);
56497+out:
56498+ set_current_state(TASK_RUNNING);
56499+ remove_wait_queue(&learn_wait, &wait);
56500+ return retval;
56501+}
56502+
56503+static unsigned int
56504+poll_learn(struct file * file, poll_table * wait)
56505+{
56506+ poll_wait(file, &learn_wait, wait);
56507+
56508+ if (learn_buffer_len)
56509+ return (POLLIN | POLLRDNORM);
56510+
56511+ return 0;
56512+}
56513+
56514+void
56515+gr_clear_learn_entries(void)
56516+{
56517+ char *tmp;
56518+
56519+ mutex_lock(&gr_learn_user_mutex);
56520+ spin_lock(&gr_learn_lock);
56521+ tmp = learn_buffer;
56522+ learn_buffer = NULL;
56523+ spin_unlock(&gr_learn_lock);
56524+ if (tmp)
56525+ vfree(tmp);
56526+ if (learn_buffer_user != NULL) {
56527+ vfree(learn_buffer_user);
56528+ learn_buffer_user = NULL;
56529+ }
56530+ learn_buffer_len = 0;
56531+ mutex_unlock(&gr_learn_user_mutex);
56532+
56533+ return;
56534+}
56535+
56536+void
56537+gr_add_learn_entry(const char *fmt, ...)
56538+{
56539+ va_list args;
56540+ unsigned int len;
56541+
56542+ if (!gr_learn_attached)
56543+ return;
56544+
56545+ spin_lock(&gr_learn_lock);
56546+
56547+ /* leave a gap at the end so we know when it's "full" but don't have to
56548+ compute the exact length of the string we're trying to append
56549+ */
56550+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
56551+ spin_unlock(&gr_learn_lock);
56552+ wake_up_interruptible(&learn_wait);
56553+ return;
56554+ }
56555+ if (learn_buffer == NULL) {
56556+ spin_unlock(&gr_learn_lock);
56557+ return;
56558+ }
56559+
56560+ va_start(args, fmt);
56561+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
56562+ va_end(args);
56563+
56564+ learn_buffer_len += len + 1;
56565+
56566+ spin_unlock(&gr_learn_lock);
56567+ wake_up_interruptible(&learn_wait);
56568+
56569+ return;
56570+}
56571+
56572+static int
56573+open_learn(struct inode *inode, struct file *file)
56574+{
56575+ if (file->f_mode & FMODE_READ && gr_learn_attached)
56576+ return -EBUSY;
56577+ if (file->f_mode & FMODE_READ) {
56578+ int retval = 0;
56579+ mutex_lock(&gr_learn_user_mutex);
56580+ if (learn_buffer == NULL)
56581+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
56582+ if (learn_buffer_user == NULL)
56583+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
56584+ if (learn_buffer == NULL) {
56585+ retval = -ENOMEM;
56586+ goto out_error;
56587+ }
56588+ if (learn_buffer_user == NULL) {
56589+ retval = -ENOMEM;
56590+ goto out_error;
56591+ }
56592+ learn_buffer_len = 0;
56593+ learn_buffer_user_len = 0;
56594+ gr_learn_attached = 1;
56595+out_error:
56596+ mutex_unlock(&gr_learn_user_mutex);
56597+ return retval;
56598+ }
56599+ return 0;
56600+}
56601+
56602+static int
56603+close_learn(struct inode *inode, struct file *file)
56604+{
56605+ if (file->f_mode & FMODE_READ) {
56606+ char *tmp = NULL;
56607+ mutex_lock(&gr_learn_user_mutex);
56608+ spin_lock(&gr_learn_lock);
56609+ tmp = learn_buffer;
56610+ learn_buffer = NULL;
56611+ spin_unlock(&gr_learn_lock);
56612+ if (tmp)
56613+ vfree(tmp);
56614+ if (learn_buffer_user != NULL) {
56615+ vfree(learn_buffer_user);
56616+ learn_buffer_user = NULL;
56617+ }
56618+ learn_buffer_len = 0;
56619+ learn_buffer_user_len = 0;
56620+ gr_learn_attached = 0;
56621+ mutex_unlock(&gr_learn_user_mutex);
56622+ }
56623+
56624+ return 0;
56625+}
56626+
56627+const struct file_operations grsec_fops = {
56628+ .read = read_learn,
56629+ .write = write_grsec_handler,
56630+ .open = open_learn,
56631+ .release = close_learn,
56632+ .poll = poll_learn,
56633+};
56634diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
56635new file mode 100644
56636index 0000000..39645c9
56637--- /dev/null
56638+++ b/grsecurity/gracl_res.c
56639@@ -0,0 +1,68 @@
56640+#include <linux/kernel.h>
56641+#include <linux/sched.h>
56642+#include <linux/gracl.h>
56643+#include <linux/grinternal.h>
56644+
56645+static const char *restab_log[] = {
56646+ [RLIMIT_CPU] = "RLIMIT_CPU",
56647+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
56648+ [RLIMIT_DATA] = "RLIMIT_DATA",
56649+ [RLIMIT_STACK] = "RLIMIT_STACK",
56650+ [RLIMIT_CORE] = "RLIMIT_CORE",
56651+ [RLIMIT_RSS] = "RLIMIT_RSS",
56652+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
56653+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
56654+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
56655+ [RLIMIT_AS] = "RLIMIT_AS",
56656+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
56657+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
56658+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
56659+ [RLIMIT_NICE] = "RLIMIT_NICE",
56660+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
56661+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
56662+ [GR_CRASH_RES] = "RLIMIT_CRASH"
56663+};
56664+
56665+void
56666+gr_log_resource(const struct task_struct *task,
56667+ const int res, const unsigned long wanted, const int gt)
56668+{
56669+ const struct cred *cred;
56670+ unsigned long rlim;
56671+
56672+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
56673+ return;
56674+
56675+ // not yet supported resource
56676+ if (unlikely(!restab_log[res]))
56677+ return;
56678+
56679+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
56680+ rlim = task_rlimit_max(task, res);
56681+ else
56682+ rlim = task_rlimit(task, res);
56683+
56684+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
56685+ return;
56686+
56687+ rcu_read_lock();
56688+ cred = __task_cred(task);
56689+
56690+ if (res == RLIMIT_NPROC &&
56691+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
56692+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
56693+ goto out_rcu_unlock;
56694+ else if (res == RLIMIT_MEMLOCK &&
56695+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
56696+ goto out_rcu_unlock;
56697+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
56698+ goto out_rcu_unlock;
56699+ rcu_read_unlock();
56700+
56701+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
56702+
56703+ return;
56704+out_rcu_unlock:
56705+ rcu_read_unlock();
56706+ return;
56707+}
56708diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
56709new file mode 100644
56710index 0000000..25197e9
56711--- /dev/null
56712+++ b/grsecurity/gracl_segv.c
56713@@ -0,0 +1,299 @@
56714+#include <linux/kernel.h>
56715+#include <linux/mm.h>
56716+#include <asm/uaccess.h>
56717+#include <asm/errno.h>
56718+#include <asm/mman.h>
56719+#include <net/sock.h>
56720+#include <linux/file.h>
56721+#include <linux/fs.h>
56722+#include <linux/net.h>
56723+#include <linux/in.h>
56724+#include <linux/slab.h>
56725+#include <linux/types.h>
56726+#include <linux/sched.h>
56727+#include <linux/timer.h>
56728+#include <linux/gracl.h>
56729+#include <linux/grsecurity.h>
56730+#include <linux/grinternal.h>
56731+
56732+static struct crash_uid *uid_set;
56733+static unsigned short uid_used;
56734+static DEFINE_SPINLOCK(gr_uid_lock);
56735+extern rwlock_t gr_inode_lock;
56736+extern struct acl_subject_label *
56737+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
56738+ struct acl_role_label *role);
56739+
56740+#ifdef CONFIG_BTRFS_FS
56741+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56742+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56743+#endif
56744+
56745+static inline dev_t __get_dev(const struct dentry *dentry)
56746+{
56747+#ifdef CONFIG_BTRFS_FS
56748+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56749+ return get_btrfs_dev_from_inode(dentry->d_inode);
56750+ else
56751+#endif
56752+ return dentry->d_inode->i_sb->s_dev;
56753+}
56754+
56755+int
56756+gr_init_uidset(void)
56757+{
56758+ uid_set =
56759+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
56760+ uid_used = 0;
56761+
56762+ return uid_set ? 1 : 0;
56763+}
56764+
56765+void
56766+gr_free_uidset(void)
56767+{
56768+ if (uid_set)
56769+ kfree(uid_set);
56770+
56771+ return;
56772+}
56773+
56774+int
56775+gr_find_uid(const uid_t uid)
56776+{
56777+ struct crash_uid *tmp = uid_set;
56778+ uid_t buid;
56779+ int low = 0, high = uid_used - 1, mid;
56780+
56781+ while (high >= low) {
56782+ mid = (low + high) >> 1;
56783+ buid = tmp[mid].uid;
56784+ if (buid == uid)
56785+ return mid;
56786+ if (buid > uid)
56787+ high = mid - 1;
56788+ if (buid < uid)
56789+ low = mid + 1;
56790+ }
56791+
56792+ return -1;
56793+}
56794+
56795+static __inline__ void
56796+gr_insertsort(void)
56797+{
56798+ unsigned short i, j;
56799+ struct crash_uid index;
56800+
56801+ for (i = 1; i < uid_used; i++) {
56802+ index = uid_set[i];
56803+ j = i;
56804+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
56805+ uid_set[j] = uid_set[j - 1];
56806+ j--;
56807+ }
56808+ uid_set[j] = index;
56809+ }
56810+
56811+ return;
56812+}
56813+
56814+static __inline__ void
56815+gr_insert_uid(const uid_t uid, const unsigned long expires)
56816+{
56817+ int loc;
56818+
56819+ if (uid_used == GR_UIDTABLE_MAX)
56820+ return;
56821+
56822+ loc = gr_find_uid(uid);
56823+
56824+ if (loc >= 0) {
56825+ uid_set[loc].expires = expires;
56826+ return;
56827+ }
56828+
56829+ uid_set[uid_used].uid = uid;
56830+ uid_set[uid_used].expires = expires;
56831+ uid_used++;
56832+
56833+ gr_insertsort();
56834+
56835+ return;
56836+}
56837+
56838+void
56839+gr_remove_uid(const unsigned short loc)
56840+{
56841+ unsigned short i;
56842+
56843+ for (i = loc + 1; i < uid_used; i++)
56844+ uid_set[i - 1] = uid_set[i];
56845+
56846+ uid_used--;
56847+
56848+ return;
56849+}
56850+
56851+int
56852+gr_check_crash_uid(const uid_t uid)
56853+{
56854+ int loc;
56855+ int ret = 0;
56856+
56857+ if (unlikely(!gr_acl_is_enabled()))
56858+ return 0;
56859+
56860+ spin_lock(&gr_uid_lock);
56861+ loc = gr_find_uid(uid);
56862+
56863+ if (loc < 0)
56864+ goto out_unlock;
56865+
56866+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
56867+ gr_remove_uid(loc);
56868+ else
56869+ ret = 1;
56870+
56871+out_unlock:
56872+ spin_unlock(&gr_uid_lock);
56873+ return ret;
56874+}
56875+
56876+static __inline__ int
56877+proc_is_setxid(const struct cred *cred)
56878+{
56879+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
56880+ !uid_eq(cred->uid, cred->fsuid))
56881+ return 1;
56882+ if (!uid_eq(cred->gid, cred->egid) || !uid_eq(cred->gid, cred->sgid) ||
56883+ !uid_eq(cred->gid, cred->fsgid))
56884+ return 1;
56885+
56886+ return 0;
56887+}
56888+
56889+extern int gr_fake_force_sig(int sig, struct task_struct *t);
56890+
56891+void
56892+gr_handle_crash(struct task_struct *task, const int sig)
56893+{
56894+ struct acl_subject_label *curr;
56895+ struct task_struct *tsk, *tsk2;
56896+ const struct cred *cred;
56897+ const struct cred *cred2;
56898+
56899+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
56900+ return;
56901+
56902+ if (unlikely(!gr_acl_is_enabled()))
56903+ return;
56904+
56905+ curr = task->acl;
56906+
56907+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
56908+ return;
56909+
56910+ if (time_before_eq(curr->expires, get_seconds())) {
56911+ curr->expires = 0;
56912+ curr->crashes = 0;
56913+ }
56914+
56915+ curr->crashes++;
56916+
56917+ if (!curr->expires)
56918+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
56919+
56920+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56921+ time_after(curr->expires, get_seconds())) {
56922+ rcu_read_lock();
56923+ cred = __task_cred(task);
56924+ if (!uid_eq(cred->uid, GLOBAL_ROOT_UID) && proc_is_setxid(cred)) {
56925+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56926+ spin_lock(&gr_uid_lock);
56927+ gr_insert_uid(cred->uid, curr->expires);
56928+ spin_unlock(&gr_uid_lock);
56929+ curr->expires = 0;
56930+ curr->crashes = 0;
56931+ read_lock(&tasklist_lock);
56932+ do_each_thread(tsk2, tsk) {
56933+ cred2 = __task_cred(tsk);
56934+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
56935+ gr_fake_force_sig(SIGKILL, tsk);
56936+ } while_each_thread(tsk2, tsk);
56937+ read_unlock(&tasklist_lock);
56938+ } else {
56939+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56940+ read_lock(&tasklist_lock);
56941+ read_lock(&grsec_exec_file_lock);
56942+ do_each_thread(tsk2, tsk) {
56943+ if (likely(tsk != task)) {
56944+ // if this thread has the same subject as the one that triggered
56945+ // RES_CRASH and it's the same binary, kill it
56946+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
56947+ gr_fake_force_sig(SIGKILL, tsk);
56948+ }
56949+ } while_each_thread(tsk2, tsk);
56950+ read_unlock(&grsec_exec_file_lock);
56951+ read_unlock(&tasklist_lock);
56952+ }
56953+ rcu_read_unlock();
56954+ }
56955+
56956+ return;
56957+}
56958+
56959+int
56960+gr_check_crash_exec(const struct file *filp)
56961+{
56962+ struct acl_subject_label *curr;
56963+
56964+ if (unlikely(!gr_acl_is_enabled()))
56965+ return 0;
56966+
56967+ read_lock(&gr_inode_lock);
56968+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
56969+ __get_dev(filp->f_path.dentry),
56970+ current->role);
56971+ read_unlock(&gr_inode_lock);
56972+
56973+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
56974+ (!curr->crashes && !curr->expires))
56975+ return 0;
56976+
56977+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56978+ time_after(curr->expires, get_seconds()))
56979+ return 1;
56980+ else if (time_before_eq(curr->expires, get_seconds())) {
56981+ curr->crashes = 0;
56982+ curr->expires = 0;
56983+ }
56984+
56985+ return 0;
56986+}
56987+
56988+void
56989+gr_handle_alertkill(struct task_struct *task)
56990+{
56991+ struct acl_subject_label *curracl;
56992+ __u32 curr_ip;
56993+ struct task_struct *p, *p2;
56994+
56995+ if (unlikely(!gr_acl_is_enabled()))
56996+ return;
56997+
56998+ curracl = task->acl;
56999+ curr_ip = task->signal->curr_ip;
57000+
57001+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
57002+ read_lock(&tasklist_lock);
57003+ do_each_thread(p2, p) {
57004+ if (p->signal->curr_ip == curr_ip)
57005+ gr_fake_force_sig(SIGKILL, p);
57006+ } while_each_thread(p2, p);
57007+ read_unlock(&tasklist_lock);
57008+ } else if (curracl->mode & GR_KILLPROC)
57009+ gr_fake_force_sig(SIGKILL, task);
57010+
57011+ return;
57012+}
57013diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
57014new file mode 100644
57015index 0000000..9d83a69
57016--- /dev/null
57017+++ b/grsecurity/gracl_shm.c
57018@@ -0,0 +1,40 @@
57019+#include <linux/kernel.h>
57020+#include <linux/mm.h>
57021+#include <linux/sched.h>
57022+#include <linux/file.h>
57023+#include <linux/ipc.h>
57024+#include <linux/gracl.h>
57025+#include <linux/grsecurity.h>
57026+#include <linux/grinternal.h>
57027+
57028+int
57029+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57030+ const time_t shm_createtime, const uid_t cuid, const int shmid)
57031+{
57032+ struct task_struct *task;
57033+
57034+ if (!gr_acl_is_enabled())
57035+ return 1;
57036+
57037+ rcu_read_lock();
57038+ read_lock(&tasklist_lock);
57039+
57040+ task = find_task_by_vpid(shm_cprid);
57041+
57042+ if (unlikely(!task))
57043+ task = find_task_by_vpid(shm_lapid);
57044+
57045+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
57046+ (task->pid == shm_lapid)) &&
57047+ (task->acl->mode & GR_PROTSHM) &&
57048+ (task->acl != current->acl))) {
57049+ read_unlock(&tasklist_lock);
57050+ rcu_read_unlock();
57051+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
57052+ return 0;
57053+ }
57054+ read_unlock(&tasklist_lock);
57055+ rcu_read_unlock();
57056+
57057+ return 1;
57058+}
57059diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
57060new file mode 100644
57061index 0000000..bc0be01
57062--- /dev/null
57063+++ b/grsecurity/grsec_chdir.c
57064@@ -0,0 +1,19 @@
57065+#include <linux/kernel.h>
57066+#include <linux/sched.h>
57067+#include <linux/fs.h>
57068+#include <linux/file.h>
57069+#include <linux/grsecurity.h>
57070+#include <linux/grinternal.h>
57071+
57072+void
57073+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
57074+{
57075+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57076+ if ((grsec_enable_chdir && grsec_enable_group &&
57077+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
57078+ !grsec_enable_group)) {
57079+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
57080+ }
57081+#endif
57082+ return;
57083+}
57084diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
57085new file mode 100644
57086index 0000000..9807ee2
57087--- /dev/null
57088+++ b/grsecurity/grsec_chroot.c
57089@@ -0,0 +1,368 @@
57090+#include <linux/kernel.h>
57091+#include <linux/module.h>
57092+#include <linux/sched.h>
57093+#include <linux/file.h>
57094+#include <linux/fs.h>
57095+#include <linux/mount.h>
57096+#include <linux/types.h>
57097+#include "../fs/mount.h"
57098+#include <linux/grsecurity.h>
57099+#include <linux/grinternal.h>
57100+
57101+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
57102+{
57103+#ifdef CONFIG_GRKERNSEC
57104+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
57105+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
57106+ task->gr_is_chrooted = 1;
57107+ else
57108+ task->gr_is_chrooted = 0;
57109+
57110+ task->gr_chroot_dentry = path->dentry;
57111+#endif
57112+ return;
57113+}
57114+
57115+void gr_clear_chroot_entries(struct task_struct *task)
57116+{
57117+#ifdef CONFIG_GRKERNSEC
57118+ task->gr_is_chrooted = 0;
57119+ task->gr_chroot_dentry = NULL;
57120+#endif
57121+ return;
57122+}
57123+
57124+int
57125+gr_handle_chroot_unix(const pid_t pid)
57126+{
57127+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57128+ struct task_struct *p;
57129+
57130+ if (unlikely(!grsec_enable_chroot_unix))
57131+ return 1;
57132+
57133+ if (likely(!proc_is_chrooted(current)))
57134+ return 1;
57135+
57136+ rcu_read_lock();
57137+ read_lock(&tasklist_lock);
57138+ p = find_task_by_vpid_unrestricted(pid);
57139+ if (unlikely(p && !have_same_root(current, p))) {
57140+ read_unlock(&tasklist_lock);
57141+ rcu_read_unlock();
57142+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
57143+ return 0;
57144+ }
57145+ read_unlock(&tasklist_lock);
57146+ rcu_read_unlock();
57147+#endif
57148+ return 1;
57149+}
57150+
57151+int
57152+gr_handle_chroot_nice(void)
57153+{
57154+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57155+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
57156+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
57157+ return -EPERM;
57158+ }
57159+#endif
57160+ return 0;
57161+}
57162+
57163+int
57164+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
57165+{
57166+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57167+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
57168+ && proc_is_chrooted(current)) {
57169+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
57170+ return -EACCES;
57171+ }
57172+#endif
57173+ return 0;
57174+}
57175+
57176+int
57177+gr_handle_chroot_rawio(const struct inode *inode)
57178+{
57179+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57180+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
57181+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
57182+ return 1;
57183+#endif
57184+ return 0;
57185+}
57186+
57187+int
57188+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
57189+{
57190+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57191+ struct task_struct *p;
57192+ int ret = 0;
57193+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
57194+ return ret;
57195+
57196+ read_lock(&tasklist_lock);
57197+ do_each_pid_task(pid, type, p) {
57198+ if (!have_same_root(current, p)) {
57199+ ret = 1;
57200+ goto out;
57201+ }
57202+ } while_each_pid_task(pid, type, p);
57203+out:
57204+ read_unlock(&tasklist_lock);
57205+ return ret;
57206+#endif
57207+ return 0;
57208+}
57209+
57210+int
57211+gr_pid_is_chrooted(struct task_struct *p)
57212+{
57213+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57214+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
57215+ return 0;
57216+
57217+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
57218+ !have_same_root(current, p)) {
57219+ return 1;
57220+ }
57221+#endif
57222+ return 0;
57223+}
57224+
57225+EXPORT_SYMBOL(gr_pid_is_chrooted);
57226+
57227+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
57228+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
57229+{
57230+ struct path path, currentroot;
57231+ int ret = 0;
57232+
57233+ path.dentry = (struct dentry *)u_dentry;
57234+ path.mnt = (struct vfsmount *)u_mnt;
57235+ get_fs_root(current->fs, &currentroot);
57236+ if (path_is_under(&path, &currentroot))
57237+ ret = 1;
57238+ path_put(&currentroot);
57239+
57240+ return ret;
57241+}
57242+#endif
57243+
57244+int
57245+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
57246+{
57247+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57248+ if (!grsec_enable_chroot_fchdir)
57249+ return 1;
57250+
57251+ if (!proc_is_chrooted(current))
57252+ return 1;
57253+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
57254+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
57255+ return 0;
57256+ }
57257+#endif
57258+ return 1;
57259+}
57260+
57261+int
57262+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57263+ const time_t shm_createtime)
57264+{
57265+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57266+ struct task_struct *p;
57267+ time_t starttime;
57268+
57269+ if (unlikely(!grsec_enable_chroot_shmat))
57270+ return 1;
57271+
57272+ if (likely(!proc_is_chrooted(current)))
57273+ return 1;
57274+
57275+ rcu_read_lock();
57276+ read_lock(&tasklist_lock);
57277+
57278+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
57279+ starttime = p->start_time.tv_sec;
57280+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
57281+ if (have_same_root(current, p)) {
57282+ goto allow;
57283+ } else {
57284+ read_unlock(&tasklist_lock);
57285+ rcu_read_unlock();
57286+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
57287+ return 0;
57288+ }
57289+ }
57290+ /* creator exited, pid reuse, fall through to next check */
57291+ }
57292+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
57293+ if (unlikely(!have_same_root(current, p))) {
57294+ read_unlock(&tasklist_lock);
57295+ rcu_read_unlock();
57296+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
57297+ return 0;
57298+ }
57299+ }
57300+
57301+allow:
57302+ read_unlock(&tasklist_lock);
57303+ rcu_read_unlock();
57304+#endif
57305+ return 1;
57306+}
57307+
57308+void
57309+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
57310+{
57311+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57312+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
57313+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
57314+#endif
57315+ return;
57316+}
57317+
57318+int
57319+gr_handle_chroot_mknod(const struct dentry *dentry,
57320+ const struct vfsmount *mnt, const int mode)
57321+{
57322+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57323+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
57324+ proc_is_chrooted(current)) {
57325+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
57326+ return -EPERM;
57327+ }
57328+#endif
57329+ return 0;
57330+}
57331+
57332+int
57333+gr_handle_chroot_mount(const struct dentry *dentry,
57334+ const struct vfsmount *mnt, const char *dev_name)
57335+{
57336+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57337+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
57338+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
57339+ return -EPERM;
57340+ }
57341+#endif
57342+ return 0;
57343+}
57344+
57345+int
57346+gr_handle_chroot_pivot(void)
57347+{
57348+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57349+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
57350+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
57351+ return -EPERM;
57352+ }
57353+#endif
57354+ return 0;
57355+}
57356+
57357+int
57358+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
57359+{
57360+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57361+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
57362+ !gr_is_outside_chroot(dentry, mnt)) {
57363+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
57364+ return -EPERM;
57365+ }
57366+#endif
57367+ return 0;
57368+}
57369+
57370+extern const char *captab_log[];
57371+extern int captab_log_entries;
57372+
57373+int
57374+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57375+{
57376+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57377+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
57378+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
57379+ if (cap_raised(chroot_caps, cap)) {
57380+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
57381+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
57382+ }
57383+ return 0;
57384+ }
57385+ }
57386+#endif
57387+ return 1;
57388+}
57389+
57390+int
57391+gr_chroot_is_capable(const int cap)
57392+{
57393+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57394+ return gr_task_chroot_is_capable(current, current_cred(), cap);
57395+#endif
57396+ return 1;
57397+}
57398+
57399+int
57400+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
57401+{
57402+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57403+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
57404+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
57405+ if (cap_raised(chroot_caps, cap)) {
57406+ return 0;
57407+ }
57408+ }
57409+#endif
57410+ return 1;
57411+}
57412+
57413+int
57414+gr_chroot_is_capable_nolog(const int cap)
57415+{
57416+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57417+ return gr_task_chroot_is_capable_nolog(current, cap);
57418+#endif
57419+ return 1;
57420+}
57421+
57422+int
57423+gr_handle_chroot_sysctl(const int op)
57424+{
57425+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57426+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
57427+ proc_is_chrooted(current))
57428+ return -EACCES;
57429+#endif
57430+ return 0;
57431+}
57432+
57433+void
57434+gr_handle_chroot_chdir(struct path *path)
57435+{
57436+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57437+ if (grsec_enable_chroot_chdir)
57438+ set_fs_pwd(current->fs, path);
57439+#endif
57440+ return;
57441+}
57442+
57443+int
57444+gr_handle_chroot_chmod(const struct dentry *dentry,
57445+ const struct vfsmount *mnt, const int mode)
57446+{
57447+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57448+ /* allow chmod +s on directories, but not files */
57449+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
57450+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
57451+ proc_is_chrooted(current)) {
57452+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
57453+ return -EPERM;
57454+ }
57455+#endif
57456+ return 0;
57457+}
57458diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
57459new file mode 100644
57460index 0000000..213ad8b
57461--- /dev/null
57462+++ b/grsecurity/grsec_disabled.c
57463@@ -0,0 +1,437 @@
57464+#include <linux/kernel.h>
57465+#include <linux/module.h>
57466+#include <linux/sched.h>
57467+#include <linux/file.h>
57468+#include <linux/fs.h>
57469+#include <linux/kdev_t.h>
57470+#include <linux/net.h>
57471+#include <linux/in.h>
57472+#include <linux/ip.h>
57473+#include <linux/skbuff.h>
57474+#include <linux/sysctl.h>
57475+
57476+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57477+void
57478+pax_set_initial_flags(struct linux_binprm *bprm)
57479+{
57480+ return;
57481+}
57482+#endif
57483+
57484+#ifdef CONFIG_SYSCTL
57485+__u32
57486+gr_handle_sysctl(const struct ctl_table * table, const int op)
57487+{
57488+ return 0;
57489+}
57490+#endif
57491+
57492+#ifdef CONFIG_TASKSTATS
57493+int gr_is_taskstats_denied(int pid)
57494+{
57495+ return 0;
57496+}
57497+#endif
57498+
57499+int
57500+gr_acl_is_enabled(void)
57501+{
57502+ return 0;
57503+}
57504+
57505+void
57506+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
57507+{
57508+ return;
57509+}
57510+
57511+int
57512+gr_handle_rawio(const struct inode *inode)
57513+{
57514+ return 0;
57515+}
57516+
57517+void
57518+gr_acl_handle_psacct(struct task_struct *task, const long code)
57519+{
57520+ return;
57521+}
57522+
57523+int
57524+gr_handle_ptrace(struct task_struct *task, const long request)
57525+{
57526+ return 0;
57527+}
57528+
57529+int
57530+gr_handle_proc_ptrace(struct task_struct *task)
57531+{
57532+ return 0;
57533+}
57534+
57535+void
57536+gr_learn_resource(const struct task_struct *task,
57537+ const int res, const unsigned long wanted, const int gt)
57538+{
57539+ return;
57540+}
57541+
57542+int
57543+gr_set_acls(const int type)
57544+{
57545+ return 0;
57546+}
57547+
57548+int
57549+gr_check_hidden_task(const struct task_struct *tsk)
57550+{
57551+ return 0;
57552+}
57553+
57554+int
57555+gr_check_protected_task(const struct task_struct *task)
57556+{
57557+ return 0;
57558+}
57559+
57560+int
57561+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
57562+{
57563+ return 0;
57564+}
57565+
57566+void
57567+gr_copy_label(struct task_struct *tsk)
57568+{
57569+ return;
57570+}
57571+
57572+void
57573+gr_set_pax_flags(struct task_struct *task)
57574+{
57575+ return;
57576+}
57577+
57578+int
57579+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
57580+ const int unsafe_share)
57581+{
57582+ return 0;
57583+}
57584+
57585+void
57586+gr_handle_delete(const ino_t ino, const dev_t dev)
57587+{
57588+ return;
57589+}
57590+
57591+void
57592+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
57593+{
57594+ return;
57595+}
57596+
57597+void
57598+gr_handle_crash(struct task_struct *task, const int sig)
57599+{
57600+ return;
57601+}
57602+
57603+int
57604+gr_check_crash_exec(const struct file *filp)
57605+{
57606+ return 0;
57607+}
57608+
57609+int
57610+gr_check_crash_uid(const uid_t uid)
57611+{
57612+ return 0;
57613+}
57614+
57615+void
57616+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
57617+ struct dentry *old_dentry,
57618+ struct dentry *new_dentry,
57619+ struct vfsmount *mnt, const __u8 replace)
57620+{
57621+ return;
57622+}
57623+
57624+int
57625+gr_search_socket(const int family, const int type, const int protocol)
57626+{
57627+ return 1;
57628+}
57629+
57630+int
57631+gr_search_connectbind(const int mode, const struct socket *sock,
57632+ const struct sockaddr_in *addr)
57633+{
57634+ return 0;
57635+}
57636+
57637+void
57638+gr_handle_alertkill(struct task_struct *task)
57639+{
57640+ return;
57641+}
57642+
57643+__u32
57644+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
57645+{
57646+ return 1;
57647+}
57648+
57649+__u32
57650+gr_acl_handle_hidden_file(const struct dentry * dentry,
57651+ const struct vfsmount * mnt)
57652+{
57653+ return 1;
57654+}
57655+
57656+__u32
57657+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
57658+ int acc_mode)
57659+{
57660+ return 1;
57661+}
57662+
57663+__u32
57664+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
57665+{
57666+ return 1;
57667+}
57668+
57669+__u32
57670+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
57671+{
57672+ return 1;
57673+}
57674+
57675+int
57676+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
57677+ unsigned int *vm_flags)
57678+{
57679+ return 1;
57680+}
57681+
57682+__u32
57683+gr_acl_handle_truncate(const struct dentry * dentry,
57684+ const struct vfsmount * mnt)
57685+{
57686+ return 1;
57687+}
57688+
57689+__u32
57690+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
57691+{
57692+ return 1;
57693+}
57694+
57695+__u32
57696+gr_acl_handle_access(const struct dentry * dentry,
57697+ const struct vfsmount * mnt, const int fmode)
57698+{
57699+ return 1;
57700+}
57701+
57702+__u32
57703+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
57704+ umode_t *mode)
57705+{
57706+ return 1;
57707+}
57708+
57709+__u32
57710+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
57711+{
57712+ return 1;
57713+}
57714+
57715+__u32
57716+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57717+{
57718+ return 1;
57719+}
57720+
57721+void
57722+grsecurity_init(void)
57723+{
57724+ return;
57725+}
57726+
57727+umode_t gr_acl_umask(void)
57728+{
57729+ return 0;
57730+}
57731+
57732+__u32
57733+gr_acl_handle_mknod(const struct dentry * new_dentry,
57734+ const struct dentry * parent_dentry,
57735+ const struct vfsmount * parent_mnt,
57736+ const int mode)
57737+{
57738+ return 1;
57739+}
57740+
57741+__u32
57742+gr_acl_handle_mkdir(const struct dentry * new_dentry,
57743+ const struct dentry * parent_dentry,
57744+ const struct vfsmount * parent_mnt)
57745+{
57746+ return 1;
57747+}
57748+
57749+__u32
57750+gr_acl_handle_symlink(const struct dentry * new_dentry,
57751+ const struct dentry * parent_dentry,
57752+ const struct vfsmount * parent_mnt, const char *from)
57753+{
57754+ return 1;
57755+}
57756+
57757+__u32
57758+gr_acl_handle_link(const struct dentry * new_dentry,
57759+ const struct dentry * parent_dentry,
57760+ const struct vfsmount * parent_mnt,
57761+ const struct dentry * old_dentry,
57762+ const struct vfsmount * old_mnt, const char *to)
57763+{
57764+ return 1;
57765+}
57766+
57767+int
57768+gr_acl_handle_rename(const struct dentry *new_dentry,
57769+ const struct dentry *parent_dentry,
57770+ const struct vfsmount *parent_mnt,
57771+ const struct dentry *old_dentry,
57772+ const struct inode *old_parent_inode,
57773+ const struct vfsmount *old_mnt, const char *newname)
57774+{
57775+ return 0;
57776+}
57777+
57778+int
57779+gr_acl_handle_filldir(const struct file *file, const char *name,
57780+ const int namelen, const ino_t ino)
57781+{
57782+ return 1;
57783+}
57784+
57785+int
57786+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57787+ const time_t shm_createtime, const uid_t cuid, const int shmid)
57788+{
57789+ return 1;
57790+}
57791+
57792+int
57793+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
57794+{
57795+ return 0;
57796+}
57797+
57798+int
57799+gr_search_accept(const struct socket *sock)
57800+{
57801+ return 0;
57802+}
57803+
57804+int
57805+gr_search_listen(const struct socket *sock)
57806+{
57807+ return 0;
57808+}
57809+
57810+int
57811+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
57812+{
57813+ return 0;
57814+}
57815+
57816+__u32
57817+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
57818+{
57819+ return 1;
57820+}
57821+
57822+__u32
57823+gr_acl_handle_creat(const struct dentry * dentry,
57824+ const struct dentry * p_dentry,
57825+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
57826+ const int imode)
57827+{
57828+ return 1;
57829+}
57830+
57831+void
57832+gr_acl_handle_exit(void)
57833+{
57834+ return;
57835+}
57836+
57837+int
57838+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
57839+{
57840+ return 1;
57841+}
57842+
57843+void
57844+gr_set_role_label(const uid_t uid, const gid_t gid)
57845+{
57846+ return;
57847+}
57848+
57849+int
57850+gr_acl_handle_procpidmem(const struct task_struct *task)
57851+{
57852+ return 0;
57853+}
57854+
57855+int
57856+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
57857+{
57858+ return 0;
57859+}
57860+
57861+int
57862+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
57863+{
57864+ return 0;
57865+}
57866+
57867+void
57868+gr_set_kernel_label(struct task_struct *task)
57869+{
57870+ return;
57871+}
57872+
57873+int
57874+gr_check_user_change(int real, int effective, int fs)
57875+{
57876+ return 0;
57877+}
57878+
57879+int
57880+gr_check_group_change(int real, int effective, int fs)
57881+{
57882+ return 0;
57883+}
57884+
57885+int gr_acl_enable_at_secure(void)
57886+{
57887+ return 0;
57888+}
57889+
57890+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57891+{
57892+ return dentry->d_inode->i_sb->s_dev;
57893+}
57894+
57895+EXPORT_SYMBOL(gr_learn_resource);
57896+EXPORT_SYMBOL(gr_set_kernel_label);
57897+#ifdef CONFIG_SECURITY
57898+EXPORT_SYMBOL(gr_check_user_change);
57899+EXPORT_SYMBOL(gr_check_group_change);
57900+#endif
57901diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
57902new file mode 100644
57903index 0000000..abfa971
57904--- /dev/null
57905+++ b/grsecurity/grsec_exec.c
57906@@ -0,0 +1,174 @@
57907+#include <linux/kernel.h>
57908+#include <linux/sched.h>
57909+#include <linux/file.h>
57910+#include <linux/binfmts.h>
57911+#include <linux/fs.h>
57912+#include <linux/types.h>
57913+#include <linux/grdefs.h>
57914+#include <linux/grsecurity.h>
57915+#include <linux/grinternal.h>
57916+#include <linux/capability.h>
57917+#include <linux/module.h>
57918+
57919+#include <asm/uaccess.h>
57920+
57921+#ifdef CONFIG_GRKERNSEC_EXECLOG
57922+static char gr_exec_arg_buf[132];
57923+static DEFINE_MUTEX(gr_exec_arg_mutex);
57924+#endif
57925+
57926+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
57927+
57928+void
57929+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
57930+{
57931+#ifdef CONFIG_GRKERNSEC_EXECLOG
57932+ char *grarg = gr_exec_arg_buf;
57933+ unsigned int i, x, execlen = 0;
57934+ char c;
57935+
57936+ if (!((grsec_enable_execlog && grsec_enable_group &&
57937+ in_group_p(grsec_audit_gid))
57938+ || (grsec_enable_execlog && !grsec_enable_group)))
57939+ return;
57940+
57941+ mutex_lock(&gr_exec_arg_mutex);
57942+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
57943+
57944+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
57945+ const char __user *p;
57946+ unsigned int len;
57947+
57948+ p = get_user_arg_ptr(argv, i);
57949+ if (IS_ERR(p))
57950+ goto log;
57951+
57952+ len = strnlen_user(p, 128 - execlen);
57953+ if (len > 128 - execlen)
57954+ len = 128 - execlen;
57955+ else if (len > 0)
57956+ len--;
57957+ if (copy_from_user(grarg + execlen, p, len))
57958+ goto log;
57959+
57960+ /* rewrite unprintable characters */
57961+ for (x = 0; x < len; x++) {
57962+ c = *(grarg + execlen + x);
57963+ if (c < 32 || c > 126)
57964+ *(grarg + execlen + x) = ' ';
57965+ }
57966+
57967+ execlen += len;
57968+ *(grarg + execlen) = ' ';
57969+ *(grarg + execlen + 1) = '\0';
57970+ execlen++;
57971+ }
57972+
57973+ log:
57974+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
57975+ bprm->file->f_path.mnt, grarg);
57976+ mutex_unlock(&gr_exec_arg_mutex);
57977+#endif
57978+ return;
57979+}
57980+
57981+#ifdef CONFIG_GRKERNSEC
57982+extern int gr_acl_is_capable(const int cap);
57983+extern int gr_acl_is_capable_nolog(const int cap);
57984+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57985+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
57986+extern int gr_chroot_is_capable(const int cap);
57987+extern int gr_chroot_is_capable_nolog(const int cap);
57988+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57989+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
57990+#endif
57991+
57992+const char *captab_log[] = {
57993+ "CAP_CHOWN",
57994+ "CAP_DAC_OVERRIDE",
57995+ "CAP_DAC_READ_SEARCH",
57996+ "CAP_FOWNER",
57997+ "CAP_FSETID",
57998+ "CAP_KILL",
57999+ "CAP_SETGID",
58000+ "CAP_SETUID",
58001+ "CAP_SETPCAP",
58002+ "CAP_LINUX_IMMUTABLE",
58003+ "CAP_NET_BIND_SERVICE",
58004+ "CAP_NET_BROADCAST",
58005+ "CAP_NET_ADMIN",
58006+ "CAP_NET_RAW",
58007+ "CAP_IPC_LOCK",
58008+ "CAP_IPC_OWNER",
58009+ "CAP_SYS_MODULE",
58010+ "CAP_SYS_RAWIO",
58011+ "CAP_SYS_CHROOT",
58012+ "CAP_SYS_PTRACE",
58013+ "CAP_SYS_PACCT",
58014+ "CAP_SYS_ADMIN",
58015+ "CAP_SYS_BOOT",
58016+ "CAP_SYS_NICE",
58017+ "CAP_SYS_RESOURCE",
58018+ "CAP_SYS_TIME",
58019+ "CAP_SYS_TTY_CONFIG",
58020+ "CAP_MKNOD",
58021+ "CAP_LEASE",
58022+ "CAP_AUDIT_WRITE",
58023+ "CAP_AUDIT_CONTROL",
58024+ "CAP_SETFCAP",
58025+ "CAP_MAC_OVERRIDE",
58026+ "CAP_MAC_ADMIN",
58027+ "CAP_SYSLOG",
58028+ "CAP_WAKE_ALARM"
58029+};
58030+
58031+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
58032+
58033+int gr_is_capable(const int cap)
58034+{
58035+#ifdef CONFIG_GRKERNSEC
58036+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
58037+ return 1;
58038+ return 0;
58039+#else
58040+ return 1;
58041+#endif
58042+}
58043+
58044+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
58045+{
58046+#ifdef CONFIG_GRKERNSEC
58047+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
58048+ return 1;
58049+ return 0;
58050+#else
58051+ return 1;
58052+#endif
58053+}
58054+
58055+int gr_is_capable_nolog(const int cap)
58056+{
58057+#ifdef CONFIG_GRKERNSEC
58058+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
58059+ return 1;
58060+ return 0;
58061+#else
58062+ return 1;
58063+#endif
58064+}
58065+
58066+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
58067+{
58068+#ifdef CONFIG_GRKERNSEC
58069+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
58070+ return 1;
58071+ return 0;
58072+#else
58073+ return 1;
58074+#endif
58075+}
58076+
58077+EXPORT_SYMBOL(gr_is_capable);
58078+EXPORT_SYMBOL(gr_is_capable_nolog);
58079+EXPORT_SYMBOL(gr_task_is_capable);
58080+EXPORT_SYMBOL(gr_task_is_capable_nolog);
58081diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
58082new file mode 100644
58083index 0000000..d3ee748
58084--- /dev/null
58085+++ b/grsecurity/grsec_fifo.c
58086@@ -0,0 +1,24 @@
58087+#include <linux/kernel.h>
58088+#include <linux/sched.h>
58089+#include <linux/fs.h>
58090+#include <linux/file.h>
58091+#include <linux/grinternal.h>
58092+
58093+int
58094+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
58095+ const struct dentry *dir, const int flag, const int acc_mode)
58096+{
58097+#ifdef CONFIG_GRKERNSEC_FIFO
58098+ const struct cred *cred = current_cred();
58099+
58100+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
58101+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
58102+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
58103+ (cred->fsuid != dentry->d_inode->i_uid)) {
58104+ if (!inode_permission(dentry->d_inode, acc_mode))
58105+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
58106+ return -EACCES;
58107+ }
58108+#endif
58109+ return 0;
58110+}
58111diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
58112new file mode 100644
58113index 0000000..8ca18bf
58114--- /dev/null
58115+++ b/grsecurity/grsec_fork.c
58116@@ -0,0 +1,23 @@
58117+#include <linux/kernel.h>
58118+#include <linux/sched.h>
58119+#include <linux/grsecurity.h>
58120+#include <linux/grinternal.h>
58121+#include <linux/errno.h>
58122+
58123+void
58124+gr_log_forkfail(const int retval)
58125+{
58126+#ifdef CONFIG_GRKERNSEC_FORKFAIL
58127+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
58128+ switch (retval) {
58129+ case -EAGAIN:
58130+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
58131+ break;
58132+ case -ENOMEM:
58133+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
58134+ break;
58135+ }
58136+ }
58137+#endif
58138+ return;
58139+}
58140diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
58141new file mode 100644
58142index 0000000..05a6015
58143--- /dev/null
58144+++ b/grsecurity/grsec_init.c
58145@@ -0,0 +1,283 @@
58146+#include <linux/kernel.h>
58147+#include <linux/sched.h>
58148+#include <linux/mm.h>
58149+#include <linux/gracl.h>
58150+#include <linux/slab.h>
58151+#include <linux/vmalloc.h>
58152+#include <linux/percpu.h>
58153+#include <linux/module.h>
58154+
58155+int grsec_enable_ptrace_readexec;
58156+int grsec_enable_setxid;
58157+int grsec_enable_symlinkown;
58158+int grsec_symlinkown_gid;
58159+int grsec_enable_brute;
58160+int grsec_enable_link;
58161+int grsec_enable_dmesg;
58162+int grsec_enable_harden_ptrace;
58163+int grsec_enable_fifo;
58164+int grsec_enable_execlog;
58165+int grsec_enable_signal;
58166+int grsec_enable_forkfail;
58167+int grsec_enable_audit_ptrace;
58168+int grsec_enable_time;
58169+int grsec_enable_audit_textrel;
58170+int grsec_enable_group;
58171+int grsec_audit_gid;
58172+int grsec_enable_chdir;
58173+int grsec_enable_mount;
58174+int grsec_enable_rofs;
58175+int grsec_enable_chroot_findtask;
58176+int grsec_enable_chroot_mount;
58177+int grsec_enable_chroot_shmat;
58178+int grsec_enable_chroot_fchdir;
58179+int grsec_enable_chroot_double;
58180+int grsec_enable_chroot_pivot;
58181+int grsec_enable_chroot_chdir;
58182+int grsec_enable_chroot_chmod;
58183+int grsec_enable_chroot_mknod;
58184+int grsec_enable_chroot_nice;
58185+int grsec_enable_chroot_execlog;
58186+int grsec_enable_chroot_caps;
58187+int grsec_enable_chroot_sysctl;
58188+int grsec_enable_chroot_unix;
58189+int grsec_enable_tpe;
58190+int grsec_tpe_gid;
58191+int grsec_enable_blackhole;
58192+#ifdef CONFIG_IPV6_MODULE
58193+EXPORT_SYMBOL(grsec_enable_blackhole);
58194+#endif
58195+int grsec_lastack_retries;
58196+int grsec_enable_tpe_all;
58197+int grsec_enable_tpe_invert;
58198+int grsec_enable_socket_all;
58199+int grsec_socket_all_gid;
58200+int grsec_enable_socket_client;
58201+int grsec_socket_client_gid;
58202+int grsec_enable_socket_server;
58203+int grsec_socket_server_gid;
58204+int grsec_resource_logging;
58205+int grsec_disable_privio;
58206+int grsec_enable_log_rwxmaps;
58207+int grsec_lock;
58208+
58209+DEFINE_SPINLOCK(grsec_alert_lock);
58210+unsigned long grsec_alert_wtime = 0;
58211+unsigned long grsec_alert_fyet = 0;
58212+
58213+DEFINE_SPINLOCK(grsec_audit_lock);
58214+
58215+DEFINE_RWLOCK(grsec_exec_file_lock);
58216+
58217+char *gr_shared_page[4];
58218+
58219+char *gr_alert_log_fmt;
58220+char *gr_audit_log_fmt;
58221+char *gr_alert_log_buf;
58222+char *gr_audit_log_buf;
58223+
58224+extern struct gr_arg *gr_usermode;
58225+extern unsigned char *gr_system_salt;
58226+extern unsigned char *gr_system_sum;
58227+
58228+void __init
58229+grsecurity_init(void)
58230+{
58231+ int j;
58232+ /* create the per-cpu shared pages */
58233+
58234+#ifdef CONFIG_X86
58235+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
58236+#endif
58237+
58238+ for (j = 0; j < 4; j++) {
58239+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
58240+ if (gr_shared_page[j] == NULL) {
58241+ panic("Unable to allocate grsecurity shared page");
58242+ return;
58243+ }
58244+ }
58245+
58246+ /* allocate log buffers */
58247+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
58248+ if (!gr_alert_log_fmt) {
58249+ panic("Unable to allocate grsecurity alert log format buffer");
58250+ return;
58251+ }
58252+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
58253+ if (!gr_audit_log_fmt) {
58254+ panic("Unable to allocate grsecurity audit log format buffer");
58255+ return;
58256+ }
58257+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
58258+ if (!gr_alert_log_buf) {
58259+ panic("Unable to allocate grsecurity alert log buffer");
58260+ return;
58261+ }
58262+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
58263+ if (!gr_audit_log_buf) {
58264+ panic("Unable to allocate grsecurity audit log buffer");
58265+ return;
58266+ }
58267+
58268+ /* allocate memory for authentication structure */
58269+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
58270+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
58271+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
58272+
58273+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
58274+ panic("Unable to allocate grsecurity authentication structure");
58275+ return;
58276+ }
58277+
58278+
58279+#ifdef CONFIG_GRKERNSEC_IO
58280+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
58281+ grsec_disable_privio = 1;
58282+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
58283+ grsec_disable_privio = 1;
58284+#else
58285+ grsec_disable_privio = 0;
58286+#endif
58287+#endif
58288+
58289+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58290+ /* for backward compatibility, tpe_invert always defaults to on if
58291+ enabled in the kernel
58292+ */
58293+ grsec_enable_tpe_invert = 1;
58294+#endif
58295+
58296+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
58297+#ifndef CONFIG_GRKERNSEC_SYSCTL
58298+ grsec_lock = 1;
58299+#endif
58300+
58301+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58302+ grsec_enable_audit_textrel = 1;
58303+#endif
58304+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58305+ grsec_enable_log_rwxmaps = 1;
58306+#endif
58307+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58308+ grsec_enable_group = 1;
58309+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
58310+#endif
58311+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58312+ grsec_enable_ptrace_readexec = 1;
58313+#endif
58314+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58315+ grsec_enable_chdir = 1;
58316+#endif
58317+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58318+ grsec_enable_harden_ptrace = 1;
58319+#endif
58320+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58321+ grsec_enable_mount = 1;
58322+#endif
58323+#ifdef CONFIG_GRKERNSEC_LINK
58324+ grsec_enable_link = 1;
58325+#endif
58326+#ifdef CONFIG_GRKERNSEC_BRUTE
58327+ grsec_enable_brute = 1;
58328+#endif
58329+#ifdef CONFIG_GRKERNSEC_DMESG
58330+ grsec_enable_dmesg = 1;
58331+#endif
58332+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58333+ grsec_enable_blackhole = 1;
58334+ grsec_lastack_retries = 4;
58335+#endif
58336+#ifdef CONFIG_GRKERNSEC_FIFO
58337+ grsec_enable_fifo = 1;
58338+#endif
58339+#ifdef CONFIG_GRKERNSEC_EXECLOG
58340+ grsec_enable_execlog = 1;
58341+#endif
58342+#ifdef CONFIG_GRKERNSEC_SETXID
58343+ grsec_enable_setxid = 1;
58344+#endif
58345+#ifdef CONFIG_GRKERNSEC_SIGNAL
58346+ grsec_enable_signal = 1;
58347+#endif
58348+#ifdef CONFIG_GRKERNSEC_FORKFAIL
58349+ grsec_enable_forkfail = 1;
58350+#endif
58351+#ifdef CONFIG_GRKERNSEC_TIME
58352+ grsec_enable_time = 1;
58353+#endif
58354+#ifdef CONFIG_GRKERNSEC_RESLOG
58355+ grsec_resource_logging = 1;
58356+#endif
58357+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58358+ grsec_enable_chroot_findtask = 1;
58359+#endif
58360+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58361+ grsec_enable_chroot_unix = 1;
58362+#endif
58363+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58364+ grsec_enable_chroot_mount = 1;
58365+#endif
58366+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58367+ grsec_enable_chroot_fchdir = 1;
58368+#endif
58369+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58370+ grsec_enable_chroot_shmat = 1;
58371+#endif
58372+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58373+ grsec_enable_audit_ptrace = 1;
58374+#endif
58375+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58376+ grsec_enable_chroot_double = 1;
58377+#endif
58378+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58379+ grsec_enable_chroot_pivot = 1;
58380+#endif
58381+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58382+ grsec_enable_chroot_chdir = 1;
58383+#endif
58384+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58385+ grsec_enable_chroot_chmod = 1;
58386+#endif
58387+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58388+ grsec_enable_chroot_mknod = 1;
58389+#endif
58390+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58391+ grsec_enable_chroot_nice = 1;
58392+#endif
58393+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58394+ grsec_enable_chroot_execlog = 1;
58395+#endif
58396+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58397+ grsec_enable_chroot_caps = 1;
58398+#endif
58399+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58400+ grsec_enable_chroot_sysctl = 1;
58401+#endif
58402+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
58403+ grsec_enable_symlinkown = 1;
58404+ grsec_symlinkown_gid = CONFIG_GRKERNSEC_SYMLINKOWN_GID;
58405+#endif
58406+#ifdef CONFIG_GRKERNSEC_TPE
58407+ grsec_enable_tpe = 1;
58408+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
58409+#ifdef CONFIG_GRKERNSEC_TPE_ALL
58410+ grsec_enable_tpe_all = 1;
58411+#endif
58412+#endif
58413+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58414+ grsec_enable_socket_all = 1;
58415+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
58416+#endif
58417+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58418+ grsec_enable_socket_client = 1;
58419+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
58420+#endif
58421+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58422+ grsec_enable_socket_server = 1;
58423+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
58424+#endif
58425+#endif
58426+
58427+ return;
58428+}
58429diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
58430new file mode 100644
58431index 0000000..589481f
58432--- /dev/null
58433+++ b/grsecurity/grsec_link.c
58434@@ -0,0 +1,58 @@
58435+#include <linux/kernel.h>
58436+#include <linux/sched.h>
58437+#include <linux/fs.h>
58438+#include <linux/file.h>
58439+#include <linux/grinternal.h>
58440+
58441+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
58442+{
58443+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
58444+ const struct inode *link_inode = link->dentry->d_inode;
58445+
58446+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
58447+ /* ignore root-owned links, e.g. /proc/self */
58448+ !uid_eq(link_inode->i_uid, GLOBAL_ROOT_UID) && target &&
58449+ !uid_eq(link_inode->i_uid, target->i_uid)) {
58450+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
58451+ return 1;
58452+ }
58453+#endif
58454+ return 0;
58455+}
58456+
58457+int
58458+gr_handle_follow_link(const struct inode *parent,
58459+ const struct inode *inode,
58460+ const struct dentry *dentry, const struct vfsmount *mnt)
58461+{
58462+#ifdef CONFIG_GRKERNSEC_LINK
58463+ const struct cred *cred = current_cred();
58464+
58465+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
58466+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
58467+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
58468+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
58469+ return -EACCES;
58470+ }
58471+#endif
58472+ return 0;
58473+}
58474+
58475+int
58476+gr_handle_hardlink(const struct dentry *dentry,
58477+ const struct vfsmount *mnt,
58478+ struct inode *inode, const int mode, const char *to)
58479+{
58480+#ifdef CONFIG_GRKERNSEC_LINK
58481+ const struct cred *cred = current_cred();
58482+
58483+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
58484+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
58485+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
58486+ !capable(CAP_FOWNER) && !uid_eq(cred->uid, GLOBAL_ROOT_UID)) {
58487+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
58488+ return -EPERM;
58489+ }
58490+#endif
58491+ return 0;
58492+}
58493diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
58494new file mode 100644
58495index 0000000..a45d2e9
58496--- /dev/null
58497+++ b/grsecurity/grsec_log.c
58498@@ -0,0 +1,322 @@
58499+#include <linux/kernel.h>
58500+#include <linux/sched.h>
58501+#include <linux/file.h>
58502+#include <linux/tty.h>
58503+#include <linux/fs.h>
58504+#include <linux/grinternal.h>
58505+
58506+#ifdef CONFIG_TREE_PREEMPT_RCU
58507+#define DISABLE_PREEMPT() preempt_disable()
58508+#define ENABLE_PREEMPT() preempt_enable()
58509+#else
58510+#define DISABLE_PREEMPT()
58511+#define ENABLE_PREEMPT()
58512+#endif
58513+
58514+#define BEGIN_LOCKS(x) \
58515+ DISABLE_PREEMPT(); \
58516+ rcu_read_lock(); \
58517+ read_lock(&tasklist_lock); \
58518+ read_lock(&grsec_exec_file_lock); \
58519+ if (x != GR_DO_AUDIT) \
58520+ spin_lock(&grsec_alert_lock); \
58521+ else \
58522+ spin_lock(&grsec_audit_lock)
58523+
58524+#define END_LOCKS(x) \
58525+ if (x != GR_DO_AUDIT) \
58526+ spin_unlock(&grsec_alert_lock); \
58527+ else \
58528+ spin_unlock(&grsec_audit_lock); \
58529+ read_unlock(&grsec_exec_file_lock); \
58530+ read_unlock(&tasklist_lock); \
58531+ rcu_read_unlock(); \
58532+ ENABLE_PREEMPT(); \
58533+ if (x == GR_DONT_AUDIT) \
58534+ gr_handle_alertkill(current)
58535+
58536+enum {
58537+ FLOODING,
58538+ NO_FLOODING
58539+};
58540+
58541+extern char *gr_alert_log_fmt;
58542+extern char *gr_audit_log_fmt;
58543+extern char *gr_alert_log_buf;
58544+extern char *gr_audit_log_buf;
58545+
58546+static int gr_log_start(int audit)
58547+{
58548+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
58549+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
58550+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58551+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
58552+ unsigned long curr_secs = get_seconds();
58553+
58554+ if (audit == GR_DO_AUDIT)
58555+ goto set_fmt;
58556+
58557+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
58558+ grsec_alert_wtime = curr_secs;
58559+ grsec_alert_fyet = 0;
58560+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
58561+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
58562+ grsec_alert_fyet++;
58563+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
58564+ grsec_alert_wtime = curr_secs;
58565+ grsec_alert_fyet++;
58566+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
58567+ return FLOODING;
58568+ }
58569+ else return FLOODING;
58570+
58571+set_fmt:
58572+#endif
58573+ memset(buf, 0, PAGE_SIZE);
58574+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
58575+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
58576+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58577+ } else if (current->signal->curr_ip) {
58578+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
58579+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
58580+ } else if (gr_acl_is_enabled()) {
58581+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
58582+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58583+ } else {
58584+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
58585+ strcpy(buf, fmt);
58586+ }
58587+
58588+ return NO_FLOODING;
58589+}
58590+
58591+static void gr_log_middle(int audit, const char *msg, va_list ap)
58592+ __attribute__ ((format (printf, 2, 0)));
58593+
58594+static void gr_log_middle(int audit, const char *msg, va_list ap)
58595+{
58596+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58597+ unsigned int len = strlen(buf);
58598+
58599+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58600+
58601+ return;
58602+}
58603+
58604+static void gr_log_middle_varargs(int audit, const char *msg, ...)
58605+ __attribute__ ((format (printf, 2, 3)));
58606+
58607+static void gr_log_middle_varargs(int audit, const char *msg, ...)
58608+{
58609+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58610+ unsigned int len = strlen(buf);
58611+ va_list ap;
58612+
58613+ va_start(ap, msg);
58614+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58615+ va_end(ap);
58616+
58617+ return;
58618+}
58619+
58620+static void gr_log_end(int audit, int append_default)
58621+{
58622+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58623+
58624+ if (append_default) {
58625+ unsigned int len = strlen(buf);
58626+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
58627+ }
58628+
58629+ printk("%s\n", buf);
58630+
58631+ return;
58632+}
58633+
58634+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
58635+{
58636+ int logtype;
58637+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
58638+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
58639+ void *voidptr = NULL;
58640+ int num1 = 0, num2 = 0;
58641+ unsigned long ulong1 = 0, ulong2 = 0;
58642+ struct dentry *dentry = NULL;
58643+ struct vfsmount *mnt = NULL;
58644+ struct file *file = NULL;
58645+ struct task_struct *task = NULL;
58646+ const struct cred *cred, *pcred;
58647+ va_list ap;
58648+
58649+ BEGIN_LOCKS(audit);
58650+ logtype = gr_log_start(audit);
58651+ if (logtype == FLOODING) {
58652+ END_LOCKS(audit);
58653+ return;
58654+ }
58655+ va_start(ap, argtypes);
58656+ switch (argtypes) {
58657+ case GR_TTYSNIFF:
58658+ task = va_arg(ap, struct task_struct *);
58659+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
58660+ break;
58661+ case GR_SYSCTL_HIDDEN:
58662+ str1 = va_arg(ap, char *);
58663+ gr_log_middle_varargs(audit, msg, result, str1);
58664+ break;
58665+ case GR_RBAC:
58666+ dentry = va_arg(ap, struct dentry *);
58667+ mnt = va_arg(ap, struct vfsmount *);
58668+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
58669+ break;
58670+ case GR_RBAC_STR:
58671+ dentry = va_arg(ap, struct dentry *);
58672+ mnt = va_arg(ap, struct vfsmount *);
58673+ str1 = va_arg(ap, char *);
58674+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
58675+ break;
58676+ case GR_STR_RBAC:
58677+ str1 = va_arg(ap, char *);
58678+ dentry = va_arg(ap, struct dentry *);
58679+ mnt = va_arg(ap, struct vfsmount *);
58680+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
58681+ break;
58682+ case GR_RBAC_MODE2:
58683+ dentry = va_arg(ap, struct dentry *);
58684+ mnt = va_arg(ap, struct vfsmount *);
58685+ str1 = va_arg(ap, char *);
58686+ str2 = va_arg(ap, char *);
58687+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
58688+ break;
58689+ case GR_RBAC_MODE3:
58690+ dentry = va_arg(ap, struct dentry *);
58691+ mnt = va_arg(ap, struct vfsmount *);
58692+ str1 = va_arg(ap, char *);
58693+ str2 = va_arg(ap, char *);
58694+ str3 = va_arg(ap, char *);
58695+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
58696+ break;
58697+ case GR_FILENAME:
58698+ dentry = va_arg(ap, struct dentry *);
58699+ mnt = va_arg(ap, struct vfsmount *);
58700+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
58701+ break;
58702+ case GR_STR_FILENAME:
58703+ str1 = va_arg(ap, char *);
58704+ dentry = va_arg(ap, struct dentry *);
58705+ mnt = va_arg(ap, struct vfsmount *);
58706+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
58707+ break;
58708+ case GR_FILENAME_STR:
58709+ dentry = va_arg(ap, struct dentry *);
58710+ mnt = va_arg(ap, struct vfsmount *);
58711+ str1 = va_arg(ap, char *);
58712+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
58713+ break;
58714+ case GR_FILENAME_TWO_INT:
58715+ dentry = va_arg(ap, struct dentry *);
58716+ mnt = va_arg(ap, struct vfsmount *);
58717+ num1 = va_arg(ap, int);
58718+ num2 = va_arg(ap, int);
58719+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
58720+ break;
58721+ case GR_FILENAME_TWO_INT_STR:
58722+ dentry = va_arg(ap, struct dentry *);
58723+ mnt = va_arg(ap, struct vfsmount *);
58724+ num1 = va_arg(ap, int);
58725+ num2 = va_arg(ap, int);
58726+ str1 = va_arg(ap, char *);
58727+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
58728+ break;
58729+ case GR_TEXTREL:
58730+ file = va_arg(ap, struct file *);
58731+ ulong1 = va_arg(ap, unsigned long);
58732+ ulong2 = va_arg(ap, unsigned long);
58733+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58734+ break;
58735+ case GR_PTRACE:
58736+ task = va_arg(ap, struct task_struct *);
58737+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58738+ break;
58739+ case GR_RESOURCE:
58740+ task = va_arg(ap, struct task_struct *);
58741+ cred = __task_cred(task);
58742+ pcred = __task_cred(task->real_parent);
58743+ ulong1 = va_arg(ap, unsigned long);
58744+ str1 = va_arg(ap, char *);
58745+ ulong2 = va_arg(ap, unsigned long);
58746+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58747+ break;
58748+ case GR_CAP:
58749+ task = va_arg(ap, struct task_struct *);
58750+ cred = __task_cred(task);
58751+ pcred = __task_cred(task->real_parent);
58752+ str1 = va_arg(ap, char *);
58753+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58754+ break;
58755+ case GR_SIG:
58756+ str1 = va_arg(ap, char *);
58757+ voidptr = va_arg(ap, void *);
58758+ gr_log_middle_varargs(audit, msg, str1, voidptr);
58759+ break;
58760+ case GR_SIG2:
58761+ task = va_arg(ap, struct task_struct *);
58762+ cred = __task_cred(task);
58763+ pcred = __task_cred(task->real_parent);
58764+ num1 = va_arg(ap, int);
58765+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58766+ break;
58767+ case GR_CRASH1:
58768+ task = va_arg(ap, struct task_struct *);
58769+ cred = __task_cred(task);
58770+ pcred = __task_cred(task->real_parent);
58771+ ulong1 = va_arg(ap, unsigned long);
58772+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
58773+ break;
58774+ case GR_CRASH2:
58775+ task = va_arg(ap, struct task_struct *);
58776+ cred = __task_cred(task);
58777+ pcred = __task_cred(task->real_parent);
58778+ ulong1 = va_arg(ap, unsigned long);
58779+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
58780+ break;
58781+ case GR_RWXMAP:
58782+ file = va_arg(ap, struct file *);
58783+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
58784+ break;
58785+ case GR_PSACCT:
58786+ {
58787+ unsigned int wday, cday;
58788+ __u8 whr, chr;
58789+ __u8 wmin, cmin;
58790+ __u8 wsec, csec;
58791+ char cur_tty[64] = { 0 };
58792+ char parent_tty[64] = { 0 };
58793+
58794+ task = va_arg(ap, struct task_struct *);
58795+ wday = va_arg(ap, unsigned int);
58796+ cday = va_arg(ap, unsigned int);
58797+ whr = va_arg(ap, int);
58798+ chr = va_arg(ap, int);
58799+ wmin = va_arg(ap, int);
58800+ cmin = va_arg(ap, int);
58801+ wsec = va_arg(ap, int);
58802+ csec = va_arg(ap, int);
58803+ ulong1 = va_arg(ap, unsigned long);
58804+ cred = __task_cred(task);
58805+ pcred = __task_cred(task->real_parent);
58806+
58807+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58808+ }
58809+ break;
58810+ default:
58811+ gr_log_middle(audit, msg, ap);
58812+ }
58813+ va_end(ap);
58814+ // these don't need DEFAULTSECARGS printed on the end
58815+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
58816+ gr_log_end(audit, 0);
58817+ else
58818+ gr_log_end(audit, 1);
58819+ END_LOCKS(audit);
58820+}
58821diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
58822new file mode 100644
58823index 0000000..f536303
58824--- /dev/null
58825+++ b/grsecurity/grsec_mem.c
58826@@ -0,0 +1,40 @@
58827+#include <linux/kernel.h>
58828+#include <linux/sched.h>
58829+#include <linux/mm.h>
58830+#include <linux/mman.h>
58831+#include <linux/grinternal.h>
58832+
58833+void
58834+gr_handle_ioperm(void)
58835+{
58836+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
58837+ return;
58838+}
58839+
58840+void
58841+gr_handle_iopl(void)
58842+{
58843+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
58844+ return;
58845+}
58846+
58847+void
58848+gr_handle_mem_readwrite(u64 from, u64 to)
58849+{
58850+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
58851+ return;
58852+}
58853+
58854+void
58855+gr_handle_vm86(void)
58856+{
58857+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
58858+ return;
58859+}
58860+
58861+void
58862+gr_log_badprocpid(const char *entry)
58863+{
58864+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
58865+ return;
58866+}
58867diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
58868new file mode 100644
58869index 0000000..2131422
58870--- /dev/null
58871+++ b/grsecurity/grsec_mount.c
58872@@ -0,0 +1,62 @@
58873+#include <linux/kernel.h>
58874+#include <linux/sched.h>
58875+#include <linux/mount.h>
58876+#include <linux/grsecurity.h>
58877+#include <linux/grinternal.h>
58878+
58879+void
58880+gr_log_remount(const char *devname, const int retval)
58881+{
58882+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58883+ if (grsec_enable_mount && (retval >= 0))
58884+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
58885+#endif
58886+ return;
58887+}
58888+
58889+void
58890+gr_log_unmount(const char *devname, const int retval)
58891+{
58892+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58893+ if (grsec_enable_mount && (retval >= 0))
58894+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
58895+#endif
58896+ return;
58897+}
58898+
58899+void
58900+gr_log_mount(const char *from, const char *to, const int retval)
58901+{
58902+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58903+ if (grsec_enable_mount && (retval >= 0))
58904+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
58905+#endif
58906+ return;
58907+}
58908+
58909+int
58910+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
58911+{
58912+#ifdef CONFIG_GRKERNSEC_ROFS
58913+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
58914+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
58915+ return -EPERM;
58916+ } else
58917+ return 0;
58918+#endif
58919+ return 0;
58920+}
58921+
58922+int
58923+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
58924+{
58925+#ifdef CONFIG_GRKERNSEC_ROFS
58926+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
58927+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
58928+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
58929+ return -EPERM;
58930+ } else
58931+ return 0;
58932+#endif
58933+ return 0;
58934+}
58935diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
58936new file mode 100644
58937index 0000000..a3b12a0
58938--- /dev/null
58939+++ b/grsecurity/grsec_pax.c
58940@@ -0,0 +1,36 @@
58941+#include <linux/kernel.h>
58942+#include <linux/sched.h>
58943+#include <linux/mm.h>
58944+#include <linux/file.h>
58945+#include <linux/grinternal.h>
58946+#include <linux/grsecurity.h>
58947+
58948+void
58949+gr_log_textrel(struct vm_area_struct * vma)
58950+{
58951+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58952+ if (grsec_enable_audit_textrel)
58953+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
58954+#endif
58955+ return;
58956+}
58957+
58958+void
58959+gr_log_rwxmmap(struct file *file)
58960+{
58961+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58962+ if (grsec_enable_log_rwxmaps)
58963+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
58964+#endif
58965+ return;
58966+}
58967+
58968+void
58969+gr_log_rwxmprotect(struct file *file)
58970+{
58971+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58972+ if (grsec_enable_log_rwxmaps)
58973+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
58974+#endif
58975+ return;
58976+}
58977diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
58978new file mode 100644
58979index 0000000..f7f29aa
58980--- /dev/null
58981+++ b/grsecurity/grsec_ptrace.c
58982@@ -0,0 +1,30 @@
58983+#include <linux/kernel.h>
58984+#include <linux/sched.h>
58985+#include <linux/grinternal.h>
58986+#include <linux/security.h>
58987+
58988+void
58989+gr_audit_ptrace(struct task_struct *task)
58990+{
58991+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58992+ if (grsec_enable_audit_ptrace)
58993+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
58994+#endif
58995+ return;
58996+}
58997+
58998+int
58999+gr_ptrace_readexec(struct file *file, int unsafe_flags)
59000+{
59001+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
59002+ const struct dentry *dentry = file->f_path.dentry;
59003+ const struct vfsmount *mnt = file->f_path.mnt;
59004+
59005+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
59006+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
59007+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
59008+ return -EACCES;
59009+ }
59010+#endif
59011+ return 0;
59012+}
59013diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
59014new file mode 100644
59015index 0000000..b4ac94c
59016--- /dev/null
59017+++ b/grsecurity/grsec_sig.c
59018@@ -0,0 +1,209 @@
59019+#include <linux/kernel.h>
59020+#include <linux/sched.h>
59021+#include <linux/delay.h>
59022+#include <linux/grsecurity.h>
59023+#include <linux/grinternal.h>
59024+#include <linux/hardirq.h>
59025+
59026+char *signames[] = {
59027+ [SIGSEGV] = "Segmentation fault",
59028+ [SIGILL] = "Illegal instruction",
59029+ [SIGABRT] = "Abort",
59030+ [SIGBUS] = "Invalid alignment/Bus error"
59031+};
59032+
59033+void
59034+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
59035+{
59036+#ifdef CONFIG_GRKERNSEC_SIGNAL
59037+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
59038+ (sig == SIGABRT) || (sig == SIGBUS))) {
59039+ if (t->pid == current->pid) {
59040+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
59041+ } else {
59042+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
59043+ }
59044+ }
59045+#endif
59046+ return;
59047+}
59048+
59049+int
59050+gr_handle_signal(const struct task_struct *p, const int sig)
59051+{
59052+#ifdef CONFIG_GRKERNSEC
59053+ /* ignore the 0 signal for protected task checks */
59054+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
59055+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
59056+ return -EPERM;
59057+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
59058+ return -EPERM;
59059+ }
59060+#endif
59061+ return 0;
59062+}
59063+
59064+#ifdef CONFIG_GRKERNSEC
59065+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
59066+
59067+int gr_fake_force_sig(int sig, struct task_struct *t)
59068+{
59069+ unsigned long int flags;
59070+ int ret, blocked, ignored;
59071+ struct k_sigaction *action;
59072+
59073+ spin_lock_irqsave(&t->sighand->siglock, flags);
59074+ action = &t->sighand->action[sig-1];
59075+ ignored = action->sa.sa_handler == SIG_IGN;
59076+ blocked = sigismember(&t->blocked, sig);
59077+ if (blocked || ignored) {
59078+ action->sa.sa_handler = SIG_DFL;
59079+ if (blocked) {
59080+ sigdelset(&t->blocked, sig);
59081+ recalc_sigpending_and_wake(t);
59082+ }
59083+ }
59084+ if (action->sa.sa_handler == SIG_DFL)
59085+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
59086+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
59087+
59088+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
59089+
59090+ return ret;
59091+}
59092+#endif
59093+
59094+#ifdef CONFIG_GRKERNSEC_BRUTE
59095+#define GR_USER_BAN_TIME (15 * 60)
59096+
59097+static int __get_dumpable(unsigned long mm_flags)
59098+{
59099+ int ret;
59100+
59101+ ret = mm_flags & MMF_DUMPABLE_MASK;
59102+ return (ret >= 2) ? 2 : ret;
59103+}
59104+#endif
59105+
59106+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
59107+{
59108+#ifdef CONFIG_GRKERNSEC_BRUTE
59109+ kuid_t uid = GLOBAL_ROOT_UID;
59110+
59111+ if (!grsec_enable_brute)
59112+ return;
59113+
59114+ rcu_read_lock();
59115+ read_lock(&tasklist_lock);
59116+ read_lock(&grsec_exec_file_lock);
59117+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
59118+ p->real_parent->brute = 1;
59119+ else {
59120+ const struct cred *cred = __task_cred(p), *cred2;
59121+ struct task_struct *tsk, *tsk2;
59122+
59123+ if (!__get_dumpable(mm_flags) && !uid_eq(cred->uid, GLOBAL_ROOT_UID)) {
59124+ struct user_struct *user;
59125+
59126+ uid = cred->uid;
59127+
59128+ /* this is put upon execution past expiration */
59129+ user = find_user(uid);
59130+ if (user == NULL)
59131+ goto unlock;
59132+ user->banned = 1;
59133+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
59134+ if (user->ban_expires == ~0UL)
59135+ user->ban_expires--;
59136+
59137+ do_each_thread(tsk2, tsk) {
59138+ cred2 = __task_cred(tsk);
59139+ if (tsk != p && uid_eq(cred2->uid, uid))
59140+ gr_fake_force_sig(SIGKILL, tsk);
59141+ } while_each_thread(tsk2, tsk);
59142+ }
59143+ }
59144+unlock:
59145+ read_unlock(&grsec_exec_file_lock);
59146+ read_unlock(&tasklist_lock);
59147+ rcu_read_unlock();
59148+
59149+ if (!uid_eq(uid, GLOBAL_ROOT_UID))
59150+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
59151+ from_kuid_munged(&init_user_ns, uid), GR_USER_BAN_TIME / 60);
59152+
59153+#endif
59154+ return;
59155+}
59156+
59157+void gr_handle_brute_check(void)
59158+{
59159+#ifdef CONFIG_GRKERNSEC_BRUTE
59160+ if (current->brute)
59161+ msleep(30 * 1000);
59162+#endif
59163+ return;
59164+}
59165+
59166+void gr_handle_kernel_exploit(void)
59167+{
59168+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
59169+ const struct cred *cred;
59170+ struct task_struct *tsk, *tsk2;
59171+ struct user_struct *user;
59172+ kuid_t uid;
59173+
59174+ if (in_irq() || in_serving_softirq() || in_nmi())
59175+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
59176+
59177+ uid = current_uid();
59178+
59179+ if (uid_eq(uid, GLOBAL_ROOT_UID))
59180+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
59181+ else {
59182+ /* kill all the processes of this user, hold a reference
59183+ to their creds struct, and prevent them from creating
59184+ another process until system reset
59185+ */
59186+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
59187+ from_kuid_munged(&init_user_ns, uid));
59188+ /* we intentionally leak this ref */
59189+ user = get_uid(current->cred->user);
59190+ if (user) {
59191+ user->banned = 1;
59192+ user->ban_expires = ~0UL;
59193+ }
59194+
59195+ read_lock(&tasklist_lock);
59196+ do_each_thread(tsk2, tsk) {
59197+ cred = __task_cred(tsk);
59198+ if (uid_eq(cred->uid, uid))
59199+ gr_fake_force_sig(SIGKILL, tsk);
59200+ } while_each_thread(tsk2, tsk);
59201+ read_unlock(&tasklist_lock);
59202+ }
59203+#endif
59204+}
59205+
59206+int __gr_process_user_ban(struct user_struct *user)
59207+{
59208+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59209+ if (unlikely(user->banned)) {
59210+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
59211+ user->banned = 0;
59212+ user->ban_expires = 0;
59213+ free_uid(user);
59214+ } else
59215+ return -EPERM;
59216+ }
59217+#endif
59218+ return 0;
59219+}
59220+
59221+int gr_process_user_ban(void)
59222+{
59223+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59224+ return __gr_process_user_ban(current->cred->user);
59225+#endif
59226+ return 0;
59227+}
59228diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
59229new file mode 100644
59230index 0000000..4030d57
59231--- /dev/null
59232+++ b/grsecurity/grsec_sock.c
59233@@ -0,0 +1,244 @@
59234+#include <linux/kernel.h>
59235+#include <linux/module.h>
59236+#include <linux/sched.h>
59237+#include <linux/file.h>
59238+#include <linux/net.h>
59239+#include <linux/in.h>
59240+#include <linux/ip.h>
59241+#include <net/sock.h>
59242+#include <net/inet_sock.h>
59243+#include <linux/grsecurity.h>
59244+#include <linux/grinternal.h>
59245+#include <linux/gracl.h>
59246+
59247+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
59248+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
59249+
59250+EXPORT_SYMBOL(gr_search_udp_recvmsg);
59251+EXPORT_SYMBOL(gr_search_udp_sendmsg);
59252+
59253+#ifdef CONFIG_UNIX_MODULE
59254+EXPORT_SYMBOL(gr_acl_handle_unix);
59255+EXPORT_SYMBOL(gr_acl_handle_mknod);
59256+EXPORT_SYMBOL(gr_handle_chroot_unix);
59257+EXPORT_SYMBOL(gr_handle_create);
59258+#endif
59259+
59260+#ifdef CONFIG_GRKERNSEC
59261+#define gr_conn_table_size 32749
59262+struct conn_table_entry {
59263+ struct conn_table_entry *next;
59264+ struct signal_struct *sig;
59265+};
59266+
59267+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
59268+DEFINE_SPINLOCK(gr_conn_table_lock);
59269+
59270+extern const char * gr_socktype_to_name(unsigned char type);
59271+extern const char * gr_proto_to_name(unsigned char proto);
59272+extern const char * gr_sockfamily_to_name(unsigned char family);
59273+
59274+static __inline__ int
59275+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
59276+{
59277+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
59278+}
59279+
59280+static __inline__ int
59281+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
59282+ __u16 sport, __u16 dport)
59283+{
59284+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
59285+ sig->gr_sport == sport && sig->gr_dport == dport))
59286+ return 1;
59287+ else
59288+ return 0;
59289+}
59290+
59291+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
59292+{
59293+ struct conn_table_entry **match;
59294+ unsigned int index;
59295+
59296+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
59297+ sig->gr_sport, sig->gr_dport,
59298+ gr_conn_table_size);
59299+
59300+ newent->sig = sig;
59301+
59302+ match = &gr_conn_table[index];
59303+ newent->next = *match;
59304+ *match = newent;
59305+
59306+ return;
59307+}
59308+
59309+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
59310+{
59311+ struct conn_table_entry *match, *last = NULL;
59312+ unsigned int index;
59313+
59314+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
59315+ sig->gr_sport, sig->gr_dport,
59316+ gr_conn_table_size);
59317+
59318+ match = gr_conn_table[index];
59319+ while (match && !conn_match(match->sig,
59320+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
59321+ sig->gr_dport)) {
59322+ last = match;
59323+ match = match->next;
59324+ }
59325+
59326+ if (match) {
59327+ if (last)
59328+ last->next = match->next;
59329+ else
59330+ gr_conn_table[index] = NULL;
59331+ kfree(match);
59332+ }
59333+
59334+ return;
59335+}
59336+
59337+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
59338+ __u16 sport, __u16 dport)
59339+{
59340+ struct conn_table_entry *match;
59341+ unsigned int index;
59342+
59343+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
59344+
59345+ match = gr_conn_table[index];
59346+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
59347+ match = match->next;
59348+
59349+ if (match)
59350+ return match->sig;
59351+ else
59352+ return NULL;
59353+}
59354+
59355+#endif
59356+
59357+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
59358+{
59359+#ifdef CONFIG_GRKERNSEC
59360+ struct signal_struct *sig = task->signal;
59361+ struct conn_table_entry *newent;
59362+
59363+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
59364+ if (newent == NULL)
59365+ return;
59366+ /* no bh lock needed since we are called with bh disabled */
59367+ spin_lock(&gr_conn_table_lock);
59368+ gr_del_task_from_ip_table_nolock(sig);
59369+ sig->gr_saddr = inet->inet_rcv_saddr;
59370+ sig->gr_daddr = inet->inet_daddr;
59371+ sig->gr_sport = inet->inet_sport;
59372+ sig->gr_dport = inet->inet_dport;
59373+ gr_add_to_task_ip_table_nolock(sig, newent);
59374+ spin_unlock(&gr_conn_table_lock);
59375+#endif
59376+ return;
59377+}
59378+
59379+void gr_del_task_from_ip_table(struct task_struct *task)
59380+{
59381+#ifdef CONFIG_GRKERNSEC
59382+ spin_lock_bh(&gr_conn_table_lock);
59383+ gr_del_task_from_ip_table_nolock(task->signal);
59384+ spin_unlock_bh(&gr_conn_table_lock);
59385+#endif
59386+ return;
59387+}
59388+
59389+void
59390+gr_attach_curr_ip(const struct sock *sk)
59391+{
59392+#ifdef CONFIG_GRKERNSEC
59393+ struct signal_struct *p, *set;
59394+ const struct inet_sock *inet = inet_sk(sk);
59395+
59396+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
59397+ return;
59398+
59399+ set = current->signal;
59400+
59401+ spin_lock_bh(&gr_conn_table_lock);
59402+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
59403+ inet->inet_dport, inet->inet_sport);
59404+ if (unlikely(p != NULL)) {
59405+ set->curr_ip = p->curr_ip;
59406+ set->used_accept = 1;
59407+ gr_del_task_from_ip_table_nolock(p);
59408+ spin_unlock_bh(&gr_conn_table_lock);
59409+ return;
59410+ }
59411+ spin_unlock_bh(&gr_conn_table_lock);
59412+
59413+ set->curr_ip = inet->inet_daddr;
59414+ set->used_accept = 1;
59415+#endif
59416+ return;
59417+}
59418+
59419+int
59420+gr_handle_sock_all(const int family, const int type, const int protocol)
59421+{
59422+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59423+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
59424+ (family != AF_UNIX)) {
59425+ if (family == AF_INET)
59426+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
59427+ else
59428+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
59429+ return -EACCES;
59430+ }
59431+#endif
59432+ return 0;
59433+}
59434+
59435+int
59436+gr_handle_sock_server(const struct sockaddr *sck)
59437+{
59438+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59439+ if (grsec_enable_socket_server &&
59440+ in_group_p(grsec_socket_server_gid) &&
59441+ sck && (sck->sa_family != AF_UNIX) &&
59442+ (sck->sa_family != AF_LOCAL)) {
59443+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
59444+ return -EACCES;
59445+ }
59446+#endif
59447+ return 0;
59448+}
59449+
59450+int
59451+gr_handle_sock_server_other(const struct sock *sck)
59452+{
59453+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59454+ if (grsec_enable_socket_server &&
59455+ in_group_p(grsec_socket_server_gid) &&
59456+ sck && (sck->sk_family != AF_UNIX) &&
59457+ (sck->sk_family != AF_LOCAL)) {
59458+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
59459+ return -EACCES;
59460+ }
59461+#endif
59462+ return 0;
59463+}
59464+
59465+int
59466+gr_handle_sock_client(const struct sockaddr *sck)
59467+{
59468+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59469+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
59470+ sck && (sck->sa_family != AF_UNIX) &&
59471+ (sck->sa_family != AF_LOCAL)) {
59472+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
59473+ return -EACCES;
59474+ }
59475+#endif
59476+ return 0;
59477+}
59478diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
59479new file mode 100644
59480index 0000000..f55ef0f
59481--- /dev/null
59482+++ b/grsecurity/grsec_sysctl.c
59483@@ -0,0 +1,469 @@
59484+#include <linux/kernel.h>
59485+#include <linux/sched.h>
59486+#include <linux/sysctl.h>
59487+#include <linux/grsecurity.h>
59488+#include <linux/grinternal.h>
59489+
59490+int
59491+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
59492+{
59493+#ifdef CONFIG_GRKERNSEC_SYSCTL
59494+ if (dirname == NULL || name == NULL)
59495+ return 0;
59496+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
59497+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
59498+ return -EACCES;
59499+ }
59500+#endif
59501+ return 0;
59502+}
59503+
59504+#ifdef CONFIG_GRKERNSEC_ROFS
59505+static int __maybe_unused one = 1;
59506+#endif
59507+
59508+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59509+struct ctl_table grsecurity_table[] = {
59510+#ifdef CONFIG_GRKERNSEC_SYSCTL
59511+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
59512+#ifdef CONFIG_GRKERNSEC_IO
59513+ {
59514+ .procname = "disable_priv_io",
59515+ .data = &grsec_disable_privio,
59516+ .maxlen = sizeof(int),
59517+ .mode = 0600,
59518+ .proc_handler = &proc_dointvec,
59519+ },
59520+#endif
59521+#endif
59522+#ifdef CONFIG_GRKERNSEC_LINK
59523+ {
59524+ .procname = "linking_restrictions",
59525+ .data = &grsec_enable_link,
59526+ .maxlen = sizeof(int),
59527+ .mode = 0600,
59528+ .proc_handler = &proc_dointvec,
59529+ },
59530+#endif
59531+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
59532+ {
59533+ .procname = "enforce_symlinksifowner",
59534+ .data = &grsec_enable_symlinkown,
59535+ .maxlen = sizeof(int),
59536+ .mode = 0600,
59537+ .proc_handler = &proc_dointvec,
59538+ },
59539+ {
59540+ .procname = "symlinkown_gid",
59541+ .data = &grsec_symlinkown_gid,
59542+ .maxlen = sizeof(int),
59543+ .mode = 0600,
59544+ .proc_handler = &proc_dointvec,
59545+ },
59546+#endif
59547+#ifdef CONFIG_GRKERNSEC_BRUTE
59548+ {
59549+ .procname = "deter_bruteforce",
59550+ .data = &grsec_enable_brute,
59551+ .maxlen = sizeof(int),
59552+ .mode = 0600,
59553+ .proc_handler = &proc_dointvec,
59554+ },
59555+#endif
59556+#ifdef CONFIG_GRKERNSEC_FIFO
59557+ {
59558+ .procname = "fifo_restrictions",
59559+ .data = &grsec_enable_fifo,
59560+ .maxlen = sizeof(int),
59561+ .mode = 0600,
59562+ .proc_handler = &proc_dointvec,
59563+ },
59564+#endif
59565+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
59566+ {
59567+ .procname = "ptrace_readexec",
59568+ .data = &grsec_enable_ptrace_readexec,
59569+ .maxlen = sizeof(int),
59570+ .mode = 0600,
59571+ .proc_handler = &proc_dointvec,
59572+ },
59573+#endif
59574+#ifdef CONFIG_GRKERNSEC_SETXID
59575+ {
59576+ .procname = "consistent_setxid",
59577+ .data = &grsec_enable_setxid,
59578+ .maxlen = sizeof(int),
59579+ .mode = 0600,
59580+ .proc_handler = &proc_dointvec,
59581+ },
59582+#endif
59583+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
59584+ {
59585+ .procname = "ip_blackhole",
59586+ .data = &grsec_enable_blackhole,
59587+ .maxlen = sizeof(int),
59588+ .mode = 0600,
59589+ .proc_handler = &proc_dointvec,
59590+ },
59591+ {
59592+ .procname = "lastack_retries",
59593+ .data = &grsec_lastack_retries,
59594+ .maxlen = sizeof(int),
59595+ .mode = 0600,
59596+ .proc_handler = &proc_dointvec,
59597+ },
59598+#endif
59599+#ifdef CONFIG_GRKERNSEC_EXECLOG
59600+ {
59601+ .procname = "exec_logging",
59602+ .data = &grsec_enable_execlog,
59603+ .maxlen = sizeof(int),
59604+ .mode = 0600,
59605+ .proc_handler = &proc_dointvec,
59606+ },
59607+#endif
59608+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59609+ {
59610+ .procname = "rwxmap_logging",
59611+ .data = &grsec_enable_log_rwxmaps,
59612+ .maxlen = sizeof(int),
59613+ .mode = 0600,
59614+ .proc_handler = &proc_dointvec,
59615+ },
59616+#endif
59617+#ifdef CONFIG_GRKERNSEC_SIGNAL
59618+ {
59619+ .procname = "signal_logging",
59620+ .data = &grsec_enable_signal,
59621+ .maxlen = sizeof(int),
59622+ .mode = 0600,
59623+ .proc_handler = &proc_dointvec,
59624+ },
59625+#endif
59626+#ifdef CONFIG_GRKERNSEC_FORKFAIL
59627+ {
59628+ .procname = "forkfail_logging",
59629+ .data = &grsec_enable_forkfail,
59630+ .maxlen = sizeof(int),
59631+ .mode = 0600,
59632+ .proc_handler = &proc_dointvec,
59633+ },
59634+#endif
59635+#ifdef CONFIG_GRKERNSEC_TIME
59636+ {
59637+ .procname = "timechange_logging",
59638+ .data = &grsec_enable_time,
59639+ .maxlen = sizeof(int),
59640+ .mode = 0600,
59641+ .proc_handler = &proc_dointvec,
59642+ },
59643+#endif
59644+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
59645+ {
59646+ .procname = "chroot_deny_shmat",
59647+ .data = &grsec_enable_chroot_shmat,
59648+ .maxlen = sizeof(int),
59649+ .mode = 0600,
59650+ .proc_handler = &proc_dointvec,
59651+ },
59652+#endif
59653+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
59654+ {
59655+ .procname = "chroot_deny_unix",
59656+ .data = &grsec_enable_chroot_unix,
59657+ .maxlen = sizeof(int),
59658+ .mode = 0600,
59659+ .proc_handler = &proc_dointvec,
59660+ },
59661+#endif
59662+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
59663+ {
59664+ .procname = "chroot_deny_mount",
59665+ .data = &grsec_enable_chroot_mount,
59666+ .maxlen = sizeof(int),
59667+ .mode = 0600,
59668+ .proc_handler = &proc_dointvec,
59669+ },
59670+#endif
59671+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
59672+ {
59673+ .procname = "chroot_deny_fchdir",
59674+ .data = &grsec_enable_chroot_fchdir,
59675+ .maxlen = sizeof(int),
59676+ .mode = 0600,
59677+ .proc_handler = &proc_dointvec,
59678+ },
59679+#endif
59680+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
59681+ {
59682+ .procname = "chroot_deny_chroot",
59683+ .data = &grsec_enable_chroot_double,
59684+ .maxlen = sizeof(int),
59685+ .mode = 0600,
59686+ .proc_handler = &proc_dointvec,
59687+ },
59688+#endif
59689+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
59690+ {
59691+ .procname = "chroot_deny_pivot",
59692+ .data = &grsec_enable_chroot_pivot,
59693+ .maxlen = sizeof(int),
59694+ .mode = 0600,
59695+ .proc_handler = &proc_dointvec,
59696+ },
59697+#endif
59698+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59699+ {
59700+ .procname = "chroot_enforce_chdir",
59701+ .data = &grsec_enable_chroot_chdir,
59702+ .maxlen = sizeof(int),
59703+ .mode = 0600,
59704+ .proc_handler = &proc_dointvec,
59705+ },
59706+#endif
59707+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59708+ {
59709+ .procname = "chroot_deny_chmod",
59710+ .data = &grsec_enable_chroot_chmod,
59711+ .maxlen = sizeof(int),
59712+ .mode = 0600,
59713+ .proc_handler = &proc_dointvec,
59714+ },
59715+#endif
59716+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59717+ {
59718+ .procname = "chroot_deny_mknod",
59719+ .data = &grsec_enable_chroot_mknod,
59720+ .maxlen = sizeof(int),
59721+ .mode = 0600,
59722+ .proc_handler = &proc_dointvec,
59723+ },
59724+#endif
59725+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59726+ {
59727+ .procname = "chroot_restrict_nice",
59728+ .data = &grsec_enable_chroot_nice,
59729+ .maxlen = sizeof(int),
59730+ .mode = 0600,
59731+ .proc_handler = &proc_dointvec,
59732+ },
59733+#endif
59734+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59735+ {
59736+ .procname = "chroot_execlog",
59737+ .data = &grsec_enable_chroot_execlog,
59738+ .maxlen = sizeof(int),
59739+ .mode = 0600,
59740+ .proc_handler = &proc_dointvec,
59741+ },
59742+#endif
59743+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59744+ {
59745+ .procname = "chroot_caps",
59746+ .data = &grsec_enable_chroot_caps,
59747+ .maxlen = sizeof(int),
59748+ .mode = 0600,
59749+ .proc_handler = &proc_dointvec,
59750+ },
59751+#endif
59752+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59753+ {
59754+ .procname = "chroot_deny_sysctl",
59755+ .data = &grsec_enable_chroot_sysctl,
59756+ .maxlen = sizeof(int),
59757+ .mode = 0600,
59758+ .proc_handler = &proc_dointvec,
59759+ },
59760+#endif
59761+#ifdef CONFIG_GRKERNSEC_TPE
59762+ {
59763+ .procname = "tpe",
59764+ .data = &grsec_enable_tpe,
59765+ .maxlen = sizeof(int),
59766+ .mode = 0600,
59767+ .proc_handler = &proc_dointvec,
59768+ },
59769+ {
59770+ .procname = "tpe_gid",
59771+ .data = &grsec_tpe_gid,
59772+ .maxlen = sizeof(int),
59773+ .mode = 0600,
59774+ .proc_handler = &proc_dointvec,
59775+ },
59776+#endif
59777+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59778+ {
59779+ .procname = "tpe_invert",
59780+ .data = &grsec_enable_tpe_invert,
59781+ .maxlen = sizeof(int),
59782+ .mode = 0600,
59783+ .proc_handler = &proc_dointvec,
59784+ },
59785+#endif
59786+#ifdef CONFIG_GRKERNSEC_TPE_ALL
59787+ {
59788+ .procname = "tpe_restrict_all",
59789+ .data = &grsec_enable_tpe_all,
59790+ .maxlen = sizeof(int),
59791+ .mode = 0600,
59792+ .proc_handler = &proc_dointvec,
59793+ },
59794+#endif
59795+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59796+ {
59797+ .procname = "socket_all",
59798+ .data = &grsec_enable_socket_all,
59799+ .maxlen = sizeof(int),
59800+ .mode = 0600,
59801+ .proc_handler = &proc_dointvec,
59802+ },
59803+ {
59804+ .procname = "socket_all_gid",
59805+ .data = &grsec_socket_all_gid,
59806+ .maxlen = sizeof(int),
59807+ .mode = 0600,
59808+ .proc_handler = &proc_dointvec,
59809+ },
59810+#endif
59811+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59812+ {
59813+ .procname = "socket_client",
59814+ .data = &grsec_enable_socket_client,
59815+ .maxlen = sizeof(int),
59816+ .mode = 0600,
59817+ .proc_handler = &proc_dointvec,
59818+ },
59819+ {
59820+ .procname = "socket_client_gid",
59821+ .data = &grsec_socket_client_gid,
59822+ .maxlen = sizeof(int),
59823+ .mode = 0600,
59824+ .proc_handler = &proc_dointvec,
59825+ },
59826+#endif
59827+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59828+ {
59829+ .procname = "socket_server",
59830+ .data = &grsec_enable_socket_server,
59831+ .maxlen = sizeof(int),
59832+ .mode = 0600,
59833+ .proc_handler = &proc_dointvec,
59834+ },
59835+ {
59836+ .procname = "socket_server_gid",
59837+ .data = &grsec_socket_server_gid,
59838+ .maxlen = sizeof(int),
59839+ .mode = 0600,
59840+ .proc_handler = &proc_dointvec,
59841+ },
59842+#endif
59843+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
59844+ {
59845+ .procname = "audit_group",
59846+ .data = &grsec_enable_group,
59847+ .maxlen = sizeof(int),
59848+ .mode = 0600,
59849+ .proc_handler = &proc_dointvec,
59850+ },
59851+ {
59852+ .procname = "audit_gid",
59853+ .data = &grsec_audit_gid,
59854+ .maxlen = sizeof(int),
59855+ .mode = 0600,
59856+ .proc_handler = &proc_dointvec,
59857+ },
59858+#endif
59859+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
59860+ {
59861+ .procname = "audit_chdir",
59862+ .data = &grsec_enable_chdir,
59863+ .maxlen = sizeof(int),
59864+ .mode = 0600,
59865+ .proc_handler = &proc_dointvec,
59866+ },
59867+#endif
59868+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59869+ {
59870+ .procname = "audit_mount",
59871+ .data = &grsec_enable_mount,
59872+ .maxlen = sizeof(int),
59873+ .mode = 0600,
59874+ .proc_handler = &proc_dointvec,
59875+ },
59876+#endif
59877+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59878+ {
59879+ .procname = "audit_textrel",
59880+ .data = &grsec_enable_audit_textrel,
59881+ .maxlen = sizeof(int),
59882+ .mode = 0600,
59883+ .proc_handler = &proc_dointvec,
59884+ },
59885+#endif
59886+#ifdef CONFIG_GRKERNSEC_DMESG
59887+ {
59888+ .procname = "dmesg",
59889+ .data = &grsec_enable_dmesg,
59890+ .maxlen = sizeof(int),
59891+ .mode = 0600,
59892+ .proc_handler = &proc_dointvec,
59893+ },
59894+#endif
59895+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59896+ {
59897+ .procname = "chroot_findtask",
59898+ .data = &grsec_enable_chroot_findtask,
59899+ .maxlen = sizeof(int),
59900+ .mode = 0600,
59901+ .proc_handler = &proc_dointvec,
59902+ },
59903+#endif
59904+#ifdef CONFIG_GRKERNSEC_RESLOG
59905+ {
59906+ .procname = "resource_logging",
59907+ .data = &grsec_resource_logging,
59908+ .maxlen = sizeof(int),
59909+ .mode = 0600,
59910+ .proc_handler = &proc_dointvec,
59911+ },
59912+#endif
59913+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59914+ {
59915+ .procname = "audit_ptrace",
59916+ .data = &grsec_enable_audit_ptrace,
59917+ .maxlen = sizeof(int),
59918+ .mode = 0600,
59919+ .proc_handler = &proc_dointvec,
59920+ },
59921+#endif
59922+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59923+ {
59924+ .procname = "harden_ptrace",
59925+ .data = &grsec_enable_harden_ptrace,
59926+ .maxlen = sizeof(int),
59927+ .mode = 0600,
59928+ .proc_handler = &proc_dointvec,
59929+ },
59930+#endif
59931+ {
59932+ .procname = "grsec_lock",
59933+ .data = &grsec_lock,
59934+ .maxlen = sizeof(int),
59935+ .mode = 0600,
59936+ .proc_handler = &proc_dointvec,
59937+ },
59938+#endif
59939+#ifdef CONFIG_GRKERNSEC_ROFS
59940+ {
59941+ .procname = "romount_protect",
59942+ .data = &grsec_enable_rofs,
59943+ .maxlen = sizeof(int),
59944+ .mode = 0600,
59945+ .proc_handler = &proc_dointvec_minmax,
59946+ .extra1 = &one,
59947+ .extra2 = &one,
59948+ },
59949+#endif
59950+ { }
59951+};
59952+#endif
59953diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
59954new file mode 100644
59955index 0000000..0dc13c3
59956--- /dev/null
59957+++ b/grsecurity/grsec_time.c
59958@@ -0,0 +1,16 @@
59959+#include <linux/kernel.h>
59960+#include <linux/sched.h>
59961+#include <linux/grinternal.h>
59962+#include <linux/module.h>
59963+
59964+void
59965+gr_log_timechange(void)
59966+{
59967+#ifdef CONFIG_GRKERNSEC_TIME
59968+ if (grsec_enable_time)
59969+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
59970+#endif
59971+ return;
59972+}
59973+
59974+EXPORT_SYMBOL(gr_log_timechange);
59975diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
59976new file mode 100644
59977index 0000000..07e0dc0
59978--- /dev/null
59979+++ b/grsecurity/grsec_tpe.c
59980@@ -0,0 +1,73 @@
59981+#include <linux/kernel.h>
59982+#include <linux/sched.h>
59983+#include <linux/file.h>
59984+#include <linux/fs.h>
59985+#include <linux/grinternal.h>
59986+
59987+extern int gr_acl_tpe_check(void);
59988+
59989+int
59990+gr_tpe_allow(const struct file *file)
59991+{
59992+#ifdef CONFIG_GRKERNSEC
59993+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
59994+ const struct cred *cred = current_cred();
59995+ char *msg = NULL;
59996+ char *msg2 = NULL;
59997+
59998+ // never restrict root
59999+ if (!cred->uid)
60000+ return 1;
60001+
60002+ if (grsec_enable_tpe) {
60003+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
60004+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
60005+ msg = "not being in trusted group";
60006+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
60007+ msg = "being in untrusted group";
60008+#else
60009+ if (in_group_p(grsec_tpe_gid))
60010+ msg = "being in untrusted group";
60011+#endif
60012+ }
60013+ if (!msg && gr_acl_tpe_check())
60014+ msg = "being in untrusted role";
60015+
60016+ // not in any affected group/role
60017+ if (!msg)
60018+ goto next_check;
60019+
60020+ if (inode->i_uid)
60021+ msg2 = "file in non-root-owned directory";
60022+ else if (inode->i_mode & S_IWOTH)
60023+ msg2 = "file in world-writable directory";
60024+ else if (inode->i_mode & S_IWGRP)
60025+ msg2 = "file in group-writable directory";
60026+
60027+ if (msg && msg2) {
60028+ char fullmsg[70] = {0};
60029+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
60030+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
60031+ return 0;
60032+ }
60033+ msg = NULL;
60034+next_check:
60035+#ifdef CONFIG_GRKERNSEC_TPE_ALL
60036+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
60037+ return 1;
60038+
60039+ if (inode->i_uid && (inode->i_uid != cred->uid))
60040+ msg = "directory not owned by user";
60041+ else if (inode->i_mode & S_IWOTH)
60042+ msg = "file in world-writable directory";
60043+ else if (inode->i_mode & S_IWGRP)
60044+ msg = "file in group-writable directory";
60045+
60046+ if (msg) {
60047+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
60048+ return 0;
60049+ }
60050+#endif
60051+#endif
60052+ return 1;
60053+}
60054diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
60055new file mode 100644
60056index 0000000..9f7b1ac
60057--- /dev/null
60058+++ b/grsecurity/grsum.c
60059@@ -0,0 +1,61 @@
60060+#include <linux/err.h>
60061+#include <linux/kernel.h>
60062+#include <linux/sched.h>
60063+#include <linux/mm.h>
60064+#include <linux/scatterlist.h>
60065+#include <linux/crypto.h>
60066+#include <linux/gracl.h>
60067+
60068+
60069+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
60070+#error "crypto and sha256 must be built into the kernel"
60071+#endif
60072+
60073+int
60074+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
60075+{
60076+ char *p;
60077+ struct crypto_hash *tfm;
60078+ struct hash_desc desc;
60079+ struct scatterlist sg;
60080+ unsigned char temp_sum[GR_SHA_LEN];
60081+ volatile int retval = 0;
60082+ volatile int dummy = 0;
60083+ unsigned int i;
60084+
60085+ sg_init_table(&sg, 1);
60086+
60087+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
60088+ if (IS_ERR(tfm)) {
60089+ /* should never happen, since sha256 should be built in */
60090+ return 1;
60091+ }
60092+
60093+ desc.tfm = tfm;
60094+ desc.flags = 0;
60095+
60096+ crypto_hash_init(&desc);
60097+
60098+ p = salt;
60099+ sg_set_buf(&sg, p, GR_SALT_LEN);
60100+ crypto_hash_update(&desc, &sg, sg.length);
60101+
60102+ p = entry->pw;
60103+ sg_set_buf(&sg, p, strlen(p));
60104+
60105+ crypto_hash_update(&desc, &sg, sg.length);
60106+
60107+ crypto_hash_final(&desc, temp_sum);
60108+
60109+ memset(entry->pw, 0, GR_PW_LEN);
60110+
60111+ for (i = 0; i < GR_SHA_LEN; i++)
60112+ if (sum[i] != temp_sum[i])
60113+ retval = 1;
60114+ else
60115+ dummy = 1; // waste a cycle
60116+
60117+ crypto_free_hash(tfm);
60118+
60119+ return retval;
60120+}
60121diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
60122index 9e6e1c6..d47b906 100644
60123--- a/include/acpi/acpi_bus.h
60124+++ b/include/acpi/acpi_bus.h
60125@@ -138,7 +138,7 @@ struct acpi_device_ops {
60126 acpi_op_bind bind;
60127 acpi_op_unbind unbind;
60128 acpi_op_notify notify;
60129-};
60130+} __no_const;
60131
60132 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
60133
60134diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
60135index 77ff547..181834f 100644
60136--- a/include/asm-generic/4level-fixup.h
60137+++ b/include/asm-generic/4level-fixup.h
60138@@ -13,8 +13,10 @@
60139 #define pmd_alloc(mm, pud, address) \
60140 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
60141 NULL: pmd_offset(pud, address))
60142+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
60143
60144 #define pud_alloc(mm, pgd, address) (pgd)
60145+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
60146 #define pud_offset(pgd, start) (pgd)
60147 #define pud_none(pud) 0
60148 #define pud_bad(pud) 0
60149diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
60150index b7babf0..3ba8aee 100644
60151--- a/include/asm-generic/atomic-long.h
60152+++ b/include/asm-generic/atomic-long.h
60153@@ -22,6 +22,12 @@
60154
60155 typedef atomic64_t atomic_long_t;
60156
60157+#ifdef CONFIG_PAX_REFCOUNT
60158+typedef atomic64_unchecked_t atomic_long_unchecked_t;
60159+#else
60160+typedef atomic64_t atomic_long_unchecked_t;
60161+#endif
60162+
60163 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
60164
60165 static inline long atomic_long_read(atomic_long_t *l)
60166@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
60167 return (long)atomic64_read(v);
60168 }
60169
60170+#ifdef CONFIG_PAX_REFCOUNT
60171+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
60172+{
60173+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60174+
60175+ return (long)atomic64_read_unchecked(v);
60176+}
60177+#endif
60178+
60179 static inline void atomic_long_set(atomic_long_t *l, long i)
60180 {
60181 atomic64_t *v = (atomic64_t *)l;
60182@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
60183 atomic64_set(v, i);
60184 }
60185
60186+#ifdef CONFIG_PAX_REFCOUNT
60187+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
60188+{
60189+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60190+
60191+ atomic64_set_unchecked(v, i);
60192+}
60193+#endif
60194+
60195 static inline void atomic_long_inc(atomic_long_t *l)
60196 {
60197 atomic64_t *v = (atomic64_t *)l;
60198@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
60199 atomic64_inc(v);
60200 }
60201
60202+#ifdef CONFIG_PAX_REFCOUNT
60203+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
60204+{
60205+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60206+
60207+ atomic64_inc_unchecked(v);
60208+}
60209+#endif
60210+
60211 static inline void atomic_long_dec(atomic_long_t *l)
60212 {
60213 atomic64_t *v = (atomic64_t *)l;
60214@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
60215 atomic64_dec(v);
60216 }
60217
60218+#ifdef CONFIG_PAX_REFCOUNT
60219+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
60220+{
60221+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60222+
60223+ atomic64_dec_unchecked(v);
60224+}
60225+#endif
60226+
60227 static inline void atomic_long_add(long i, atomic_long_t *l)
60228 {
60229 atomic64_t *v = (atomic64_t *)l;
60230@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
60231 atomic64_add(i, v);
60232 }
60233
60234+#ifdef CONFIG_PAX_REFCOUNT
60235+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
60236+{
60237+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60238+
60239+ atomic64_add_unchecked(i, v);
60240+}
60241+#endif
60242+
60243 static inline void atomic_long_sub(long i, atomic_long_t *l)
60244 {
60245 atomic64_t *v = (atomic64_t *)l;
60246@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
60247 atomic64_sub(i, v);
60248 }
60249
60250+#ifdef CONFIG_PAX_REFCOUNT
60251+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
60252+{
60253+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60254+
60255+ atomic64_sub_unchecked(i, v);
60256+}
60257+#endif
60258+
60259 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
60260 {
60261 atomic64_t *v = (atomic64_t *)l;
60262@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
60263 return (long)atomic64_inc_return(v);
60264 }
60265
60266+#ifdef CONFIG_PAX_REFCOUNT
60267+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
60268+{
60269+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60270+
60271+ return (long)atomic64_inc_return_unchecked(v);
60272+}
60273+#endif
60274+
60275 static inline long atomic_long_dec_return(atomic_long_t *l)
60276 {
60277 atomic64_t *v = (atomic64_t *)l;
60278@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
60279
60280 typedef atomic_t atomic_long_t;
60281
60282+#ifdef CONFIG_PAX_REFCOUNT
60283+typedef atomic_unchecked_t atomic_long_unchecked_t;
60284+#else
60285+typedef atomic_t atomic_long_unchecked_t;
60286+#endif
60287+
60288 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
60289 static inline long atomic_long_read(atomic_long_t *l)
60290 {
60291@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
60292 return (long)atomic_read(v);
60293 }
60294
60295+#ifdef CONFIG_PAX_REFCOUNT
60296+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
60297+{
60298+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60299+
60300+ return (long)atomic_read_unchecked(v);
60301+}
60302+#endif
60303+
60304 static inline void atomic_long_set(atomic_long_t *l, long i)
60305 {
60306 atomic_t *v = (atomic_t *)l;
60307@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
60308 atomic_set(v, i);
60309 }
60310
60311+#ifdef CONFIG_PAX_REFCOUNT
60312+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
60313+{
60314+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60315+
60316+ atomic_set_unchecked(v, i);
60317+}
60318+#endif
60319+
60320 static inline void atomic_long_inc(atomic_long_t *l)
60321 {
60322 atomic_t *v = (atomic_t *)l;
60323@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
60324 atomic_inc(v);
60325 }
60326
60327+#ifdef CONFIG_PAX_REFCOUNT
60328+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
60329+{
60330+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60331+
60332+ atomic_inc_unchecked(v);
60333+}
60334+#endif
60335+
60336 static inline void atomic_long_dec(atomic_long_t *l)
60337 {
60338 atomic_t *v = (atomic_t *)l;
60339@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
60340 atomic_dec(v);
60341 }
60342
60343+#ifdef CONFIG_PAX_REFCOUNT
60344+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
60345+{
60346+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60347+
60348+ atomic_dec_unchecked(v);
60349+}
60350+#endif
60351+
60352 static inline void atomic_long_add(long i, atomic_long_t *l)
60353 {
60354 atomic_t *v = (atomic_t *)l;
60355@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
60356 atomic_add(i, v);
60357 }
60358
60359+#ifdef CONFIG_PAX_REFCOUNT
60360+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
60361+{
60362+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60363+
60364+ atomic_add_unchecked(i, v);
60365+}
60366+#endif
60367+
60368 static inline void atomic_long_sub(long i, atomic_long_t *l)
60369 {
60370 atomic_t *v = (atomic_t *)l;
60371@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
60372 atomic_sub(i, v);
60373 }
60374
60375+#ifdef CONFIG_PAX_REFCOUNT
60376+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
60377+{
60378+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60379+
60380+ atomic_sub_unchecked(i, v);
60381+}
60382+#endif
60383+
60384 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
60385 {
60386 atomic_t *v = (atomic_t *)l;
60387@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
60388 return (long)atomic_inc_return(v);
60389 }
60390
60391+#ifdef CONFIG_PAX_REFCOUNT
60392+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
60393+{
60394+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60395+
60396+ return (long)atomic_inc_return_unchecked(v);
60397+}
60398+#endif
60399+
60400 static inline long atomic_long_dec_return(atomic_long_t *l)
60401 {
60402 atomic_t *v = (atomic_t *)l;
60403@@ -255,4 +393,55 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
60404
60405 #endif /* BITS_PER_LONG == 64 */
60406
60407+#ifdef CONFIG_PAX_REFCOUNT
60408+static inline void pax_refcount_needs_these_functions(void)
60409+{
60410+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
60411+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
60412+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
60413+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
60414+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
60415+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
60416+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
60417+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
60418+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
60419+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
60420+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
60421+#ifdef CONFIG_X86
60422+ atomic_clear_mask_unchecked(0, NULL);
60423+ atomic_set_mask_unchecked(0, NULL);
60424+#endif
60425+
60426+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
60427+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
60428+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
60429+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
60430+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
60431+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
60432+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
60433+}
60434+#else
60435+#define atomic_read_unchecked(v) atomic_read(v)
60436+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
60437+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
60438+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
60439+#define atomic_inc_unchecked(v) atomic_inc(v)
60440+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
60441+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
60442+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
60443+#define atomic_dec_unchecked(v) atomic_dec(v)
60444+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
60445+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
60446+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
60447+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
60448+
60449+#define atomic_long_read_unchecked(v) atomic_long_read(v)
60450+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
60451+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
60452+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
60453+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
60454+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
60455+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
60456+#endif
60457+
60458 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
60459diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
60460index 1ced641..c896ee8 100644
60461--- a/include/asm-generic/atomic.h
60462+++ b/include/asm-generic/atomic.h
60463@@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
60464 * Atomically clears the bits set in @mask from @v
60465 */
60466 #ifndef atomic_clear_mask
60467-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
60468+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
60469 {
60470 unsigned long flags;
60471
60472diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
60473index b18ce4f..2ee2843 100644
60474--- a/include/asm-generic/atomic64.h
60475+++ b/include/asm-generic/atomic64.h
60476@@ -16,6 +16,8 @@ typedef struct {
60477 long long counter;
60478 } atomic64_t;
60479
60480+typedef atomic64_t atomic64_unchecked_t;
60481+
60482 #define ATOMIC64_INIT(i) { (i) }
60483
60484 extern long long atomic64_read(const atomic64_t *v);
60485@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
60486 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
60487 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
60488
60489+#define atomic64_read_unchecked(v) atomic64_read(v)
60490+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
60491+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
60492+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
60493+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
60494+#define atomic64_inc_unchecked(v) atomic64_inc(v)
60495+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
60496+#define atomic64_dec_unchecked(v) atomic64_dec(v)
60497+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
60498+
60499 #endif /* _ASM_GENERIC_ATOMIC64_H */
60500diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
60501index 1bfcfe5..e04c5c9 100644
60502--- a/include/asm-generic/cache.h
60503+++ b/include/asm-generic/cache.h
60504@@ -6,7 +6,7 @@
60505 * cache lines need to provide their own cache.h.
60506 */
60507
60508-#define L1_CACHE_SHIFT 5
60509-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
60510+#define L1_CACHE_SHIFT 5UL
60511+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
60512
60513 #endif /* __ASM_GENERIC_CACHE_H */
60514diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
60515index 0d68a1e..b74a761 100644
60516--- a/include/asm-generic/emergency-restart.h
60517+++ b/include/asm-generic/emergency-restart.h
60518@@ -1,7 +1,7 @@
60519 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
60520 #define _ASM_GENERIC_EMERGENCY_RESTART_H
60521
60522-static inline void machine_emergency_restart(void)
60523+static inline __noreturn void machine_emergency_restart(void)
60524 {
60525 machine_restart(NULL);
60526 }
60527diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
60528index 0232ccb..13d9165 100644
60529--- a/include/asm-generic/kmap_types.h
60530+++ b/include/asm-generic/kmap_types.h
60531@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
60532 KMAP_D(17) KM_NMI,
60533 KMAP_D(18) KM_NMI_PTE,
60534 KMAP_D(19) KM_KDB,
60535+KMAP_D(20) KM_CLEARPAGE,
60536 /*
60537 * Remember to update debug_kmap_atomic() when adding new kmap types!
60538 */
60539-KMAP_D(20) KM_TYPE_NR
60540+KMAP_D(21) KM_TYPE_NR
60541 };
60542
60543 #undef KMAP_D
60544diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
60545index 9ceb03b..2efbcbd 100644
60546--- a/include/asm-generic/local.h
60547+++ b/include/asm-generic/local.h
60548@@ -39,6 +39,7 @@ typedef struct
60549 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
60550 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
60551 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
60552+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
60553
60554 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
60555 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
60556diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
60557index 725612b..9cc513a 100644
60558--- a/include/asm-generic/pgtable-nopmd.h
60559+++ b/include/asm-generic/pgtable-nopmd.h
60560@@ -1,14 +1,19 @@
60561 #ifndef _PGTABLE_NOPMD_H
60562 #define _PGTABLE_NOPMD_H
60563
60564-#ifndef __ASSEMBLY__
60565-
60566 #include <asm-generic/pgtable-nopud.h>
60567
60568-struct mm_struct;
60569-
60570 #define __PAGETABLE_PMD_FOLDED
60571
60572+#define PMD_SHIFT PUD_SHIFT
60573+#define PTRS_PER_PMD 1
60574+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
60575+#define PMD_MASK (~(PMD_SIZE-1))
60576+
60577+#ifndef __ASSEMBLY__
60578+
60579+struct mm_struct;
60580+
60581 /*
60582 * Having the pmd type consist of a pud gets the size right, and allows
60583 * us to conceptually access the pud entry that this pmd is folded into
60584@@ -16,11 +21,6 @@ struct mm_struct;
60585 */
60586 typedef struct { pud_t pud; } pmd_t;
60587
60588-#define PMD_SHIFT PUD_SHIFT
60589-#define PTRS_PER_PMD 1
60590-#define PMD_SIZE (1UL << PMD_SHIFT)
60591-#define PMD_MASK (~(PMD_SIZE-1))
60592-
60593 /*
60594 * The "pud_xxx()" functions here are trivial for a folded two-level
60595 * setup: the pmd is never bad, and a pmd always exists (as it's folded
60596diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
60597index 810431d..0ec4804f 100644
60598--- a/include/asm-generic/pgtable-nopud.h
60599+++ b/include/asm-generic/pgtable-nopud.h
60600@@ -1,10 +1,15 @@
60601 #ifndef _PGTABLE_NOPUD_H
60602 #define _PGTABLE_NOPUD_H
60603
60604-#ifndef __ASSEMBLY__
60605-
60606 #define __PAGETABLE_PUD_FOLDED
60607
60608+#define PUD_SHIFT PGDIR_SHIFT
60609+#define PTRS_PER_PUD 1
60610+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
60611+#define PUD_MASK (~(PUD_SIZE-1))
60612+
60613+#ifndef __ASSEMBLY__
60614+
60615 /*
60616 * Having the pud type consist of a pgd gets the size right, and allows
60617 * us to conceptually access the pgd entry that this pud is folded into
60618@@ -12,11 +17,6 @@
60619 */
60620 typedef struct { pgd_t pgd; } pud_t;
60621
60622-#define PUD_SHIFT PGDIR_SHIFT
60623-#define PTRS_PER_PUD 1
60624-#define PUD_SIZE (1UL << PUD_SHIFT)
60625-#define PUD_MASK (~(PUD_SIZE-1))
60626-
60627 /*
60628 * The "pgd_xxx()" functions here are trivial for a folded two-level
60629 * setup: the pud is never bad, and a pud always exists (as it's folded
60630@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
60631 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
60632
60633 #define pgd_populate(mm, pgd, pud) do { } while (0)
60634+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
60635 /*
60636 * (puds are folded into pgds so this doesn't get actually called,
60637 * but the define is needed for a generic inline function.)
60638diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
60639index ff4947b..f48183f 100644
60640--- a/include/asm-generic/pgtable.h
60641+++ b/include/asm-generic/pgtable.h
60642@@ -530,6 +530,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
60643 #endif
60644 }
60645
60646+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
60647+static inline unsigned long pax_open_kernel(void) { return 0; }
60648+#endif
60649+
60650+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
60651+static inline unsigned long pax_close_kernel(void) { return 0; }
60652+#endif
60653+
60654 #endif /* CONFIG_MMU */
60655
60656 #endif /* !__ASSEMBLY__ */
60657diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
60658index 4e2e1cc..12c266b 100644
60659--- a/include/asm-generic/vmlinux.lds.h
60660+++ b/include/asm-generic/vmlinux.lds.h
60661@@ -218,6 +218,7 @@
60662 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
60663 VMLINUX_SYMBOL(__start_rodata) = .; \
60664 *(.rodata) *(.rodata.*) \
60665+ *(.data..read_only) \
60666 *(__vermagic) /* Kernel version magic */ \
60667 . = ALIGN(8); \
60668 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
60669@@ -716,17 +717,18 @@
60670 * section in the linker script will go there too. @phdr should have
60671 * a leading colon.
60672 *
60673- * Note that this macros defines __per_cpu_load as an absolute symbol.
60674+ * Note that this macros defines per_cpu_load as an absolute symbol.
60675 * If there is no need to put the percpu section at a predetermined
60676 * address, use PERCPU_SECTION.
60677 */
60678 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
60679- VMLINUX_SYMBOL(__per_cpu_load) = .; \
60680- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
60681+ per_cpu_load = .; \
60682+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
60683 - LOAD_OFFSET) { \
60684+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
60685 PERCPU_INPUT(cacheline) \
60686 } phdr \
60687- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
60688+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
60689
60690 /**
60691 * PERCPU_SECTION - define output section for percpu area, simple version
60692diff --git a/include/drm/drmP.h b/include/drm/drmP.h
60693index 31ad880..4e79884 100644
60694--- a/include/drm/drmP.h
60695+++ b/include/drm/drmP.h
60696@@ -72,6 +72,7 @@
60697 #include <linux/workqueue.h>
60698 #include <linux/poll.h>
60699 #include <asm/pgalloc.h>
60700+#include <asm/local.h>
60701 #include "drm.h"
60702
60703 #include <linux/idr.h>
60704@@ -1074,7 +1075,7 @@ struct drm_device {
60705
60706 /** \name Usage Counters */
60707 /*@{ */
60708- int open_count; /**< Outstanding files open */
60709+ local_t open_count; /**< Outstanding files open */
60710 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
60711 atomic_t vma_count; /**< Outstanding vma areas open */
60712 int buf_use; /**< Buffers in use -- cannot alloc */
60713@@ -1085,7 +1086,7 @@ struct drm_device {
60714 /*@{ */
60715 unsigned long counters;
60716 enum drm_stat_type types[15];
60717- atomic_t counts[15];
60718+ atomic_unchecked_t counts[15];
60719 /*@} */
60720
60721 struct list_head filelist;
60722diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
60723index 7988e55..ec974c9 100644
60724--- a/include/drm/drm_crtc_helper.h
60725+++ b/include/drm/drm_crtc_helper.h
60726@@ -81,7 +81,7 @@ struct drm_crtc_helper_funcs {
60727
60728 /* disable crtc when not in use - more explicit than dpms off */
60729 void (*disable)(struct drm_crtc *crtc);
60730-};
60731+} __no_const;
60732
60733 /**
60734 * drm_encoder_helper_funcs - helper operations for encoders
60735@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
60736 struct drm_connector *connector);
60737 /* disable encoder when not in use - more explicit than dpms off */
60738 void (*disable)(struct drm_encoder *encoder);
60739-};
60740+} __no_const;
60741
60742 /**
60743 * drm_connector_helper_funcs - helper operations for connectors
60744diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
60745index d6d1da4..fdd1ac5 100644
60746--- a/include/drm/ttm/ttm_memory.h
60747+++ b/include/drm/ttm/ttm_memory.h
60748@@ -48,7 +48,7 @@
60749
60750 struct ttm_mem_shrink {
60751 int (*do_shrink) (struct ttm_mem_shrink *);
60752-};
60753+} __no_const;
60754
60755 /**
60756 * struct ttm_mem_global - Global memory accounting structure.
60757diff --git a/include/linux/a.out.h b/include/linux/a.out.h
60758index e86dfca..40cc55f 100644
60759--- a/include/linux/a.out.h
60760+++ b/include/linux/a.out.h
60761@@ -39,6 +39,14 @@ enum machine_type {
60762 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
60763 };
60764
60765+/* Constants for the N_FLAGS field */
60766+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60767+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
60768+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
60769+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
60770+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60771+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60772+
60773 #if !defined (N_MAGIC)
60774 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
60775 #endif
60776diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
60777index 06fd4bb..1caec0d 100644
60778--- a/include/linux/atmdev.h
60779+++ b/include/linux/atmdev.h
60780@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
60781 #endif
60782
60783 struct k_atm_aal_stats {
60784-#define __HANDLE_ITEM(i) atomic_t i
60785+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60786 __AAL_STAT_ITEMS
60787 #undef __HANDLE_ITEM
60788 };
60789diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
60790index 366422b..1fa7f84 100644
60791--- a/include/linux/binfmts.h
60792+++ b/include/linux/binfmts.h
60793@@ -89,6 +89,7 @@ struct linux_binfmt {
60794 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
60795 int (*load_shlib)(struct file *);
60796 int (*core_dump)(struct coredump_params *cprm);
60797+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
60798 unsigned long min_coredump; /* minimal dump size */
60799 };
60800
60801diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
60802index 07954b0..cb2ae71 100644
60803--- a/include/linux/blkdev.h
60804+++ b/include/linux/blkdev.h
60805@@ -1393,7 +1393,7 @@ struct block_device_operations {
60806 /* this callback is with swap_lock and sometimes page table lock held */
60807 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
60808 struct module *owner;
60809-};
60810+} __do_const;
60811
60812 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
60813 unsigned long);
60814diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
60815index 4d1a074..88f929a 100644
60816--- a/include/linux/blktrace_api.h
60817+++ b/include/linux/blktrace_api.h
60818@@ -162,7 +162,7 @@ struct blk_trace {
60819 struct dentry *dir;
60820 struct dentry *dropped_file;
60821 struct dentry *msg_file;
60822- atomic_t dropped;
60823+ atomic_unchecked_t dropped;
60824 };
60825
60826 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
60827diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
60828index 83195fb..0b0f77d 100644
60829--- a/include/linux/byteorder/little_endian.h
60830+++ b/include/linux/byteorder/little_endian.h
60831@@ -42,51 +42,51 @@
60832
60833 static inline __le64 __cpu_to_le64p(const __u64 *p)
60834 {
60835- return (__force __le64)*p;
60836+ return (__force const __le64)*p;
60837 }
60838 static inline __u64 __le64_to_cpup(const __le64 *p)
60839 {
60840- return (__force __u64)*p;
60841+ return (__force const __u64)*p;
60842 }
60843 static inline __le32 __cpu_to_le32p(const __u32 *p)
60844 {
60845- return (__force __le32)*p;
60846+ return (__force const __le32)*p;
60847 }
60848 static inline __u32 __le32_to_cpup(const __le32 *p)
60849 {
60850- return (__force __u32)*p;
60851+ return (__force const __u32)*p;
60852 }
60853 static inline __le16 __cpu_to_le16p(const __u16 *p)
60854 {
60855- return (__force __le16)*p;
60856+ return (__force const __le16)*p;
60857 }
60858 static inline __u16 __le16_to_cpup(const __le16 *p)
60859 {
60860- return (__force __u16)*p;
60861+ return (__force const __u16)*p;
60862 }
60863 static inline __be64 __cpu_to_be64p(const __u64 *p)
60864 {
60865- return (__force __be64)__swab64p(p);
60866+ return (__force const __be64)__swab64p(p);
60867 }
60868 static inline __u64 __be64_to_cpup(const __be64 *p)
60869 {
60870- return __swab64p((__u64 *)p);
60871+ return __swab64p((const __u64 *)p);
60872 }
60873 static inline __be32 __cpu_to_be32p(const __u32 *p)
60874 {
60875- return (__force __be32)__swab32p(p);
60876+ return (__force const __be32)__swab32p(p);
60877 }
60878 static inline __u32 __be32_to_cpup(const __be32 *p)
60879 {
60880- return __swab32p((__u32 *)p);
60881+ return __swab32p((const __u32 *)p);
60882 }
60883 static inline __be16 __cpu_to_be16p(const __u16 *p)
60884 {
60885- return (__force __be16)__swab16p(p);
60886+ return (__force const __be16)__swab16p(p);
60887 }
60888 static inline __u16 __be16_to_cpup(const __be16 *p)
60889 {
60890- return __swab16p((__u16 *)p);
60891+ return __swab16p((const __u16 *)p);
60892 }
60893 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
60894 #define __le64_to_cpus(x) do { (void)(x); } while (0)
60895diff --git a/include/linux/cache.h b/include/linux/cache.h
60896index 4c57065..4307975 100644
60897--- a/include/linux/cache.h
60898+++ b/include/linux/cache.h
60899@@ -16,6 +16,10 @@
60900 #define __read_mostly
60901 #endif
60902
60903+#ifndef __read_only
60904+#define __read_only __read_mostly
60905+#endif
60906+
60907 #ifndef ____cacheline_aligned
60908 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
60909 #endif
60910diff --git a/include/linux/capability.h b/include/linux/capability.h
60911index d10b7ed..0288b79 100644
60912--- a/include/linux/capability.h
60913+++ b/include/linux/capability.h
60914@@ -553,10 +553,15 @@ extern bool capable(int cap);
60915 extern bool ns_capable(struct user_namespace *ns, int cap);
60916 extern bool nsown_capable(int cap);
60917 extern bool inode_capable(const struct inode *inode, int cap);
60918+extern bool capable_nolog(int cap);
60919+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
60920+extern bool inode_capable_nolog(const struct inode *inode, int cap);
60921
60922 /* audit system wants to get cap info from files as well */
60923 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
60924
60925+extern int is_privileged_binary(const struct dentry *dentry);
60926+
60927 #endif /* __KERNEL__ */
60928
60929 #endif /* !_LINUX_CAPABILITY_H */
60930diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
60931index 42e55de..1cd0e66 100644
60932--- a/include/linux/cleancache.h
60933+++ b/include/linux/cleancache.h
60934@@ -31,7 +31,7 @@ struct cleancache_ops {
60935 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
60936 void (*invalidate_inode)(int, struct cleancache_filekey);
60937 void (*invalidate_fs)(int);
60938-};
60939+} __no_const;
60940
60941 extern struct cleancache_ops
60942 cleancache_register_ops(struct cleancache_ops *ops);
60943diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
60944index 4a0b483..f1f70ba 100644
60945--- a/include/linux/clk-provider.h
60946+++ b/include/linux/clk-provider.h
60947@@ -110,6 +110,7 @@ struct clk_ops {
60948 unsigned long);
60949 void (*init)(struct clk_hw *hw);
60950 };
60951+typedef struct clk_ops __no_const clk_ops_no_const;
60952
60953 /**
60954 * struct clk_init_data - holds init data that's common to all clocks and is
60955diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
60956index 2f40791..938880e 100644
60957--- a/include/linux/compiler-gcc4.h
60958+++ b/include/linux/compiler-gcc4.h
60959@@ -32,6 +32,21 @@
60960 #define __linktime_error(message) __attribute__((__error__(message)))
60961
60962 #if __GNUC_MINOR__ >= 5
60963+
60964+#ifdef CONSTIFY_PLUGIN
60965+#define __no_const __attribute__((no_const))
60966+#define __do_const __attribute__((do_const))
60967+#endif
60968+
60969+#ifdef SIZE_OVERFLOW_PLUGIN
60970+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
60971+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
60972+#endif
60973+
60974+#ifdef LATENT_ENTROPY_PLUGIN
60975+#define __latent_entropy __attribute__((latent_entropy))
60976+#endif
60977+
60978 /*
60979 * Mark a position in code as unreachable. This can be used to
60980 * suppress control flow warnings after asm blocks that transfer
60981@@ -47,6 +62,11 @@
60982 #define __noclone __attribute__((__noclone__))
60983
60984 #endif
60985+
60986+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
60987+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
60988+#define __bos0(ptr) __bos((ptr), 0)
60989+#define __bos1(ptr) __bos((ptr), 1)
60990 #endif
60991
60992 #if __GNUC_MINOR__ > 0
60993diff --git a/include/linux/compiler.h b/include/linux/compiler.h
60994index 923d093..3625de1 100644
60995--- a/include/linux/compiler.h
60996+++ b/include/linux/compiler.h
60997@@ -5,31 +5,62 @@
60998
60999 #ifdef __CHECKER__
61000 # define __user __attribute__((noderef, address_space(1)))
61001+# define __force_user __force __user
61002 # define __kernel __attribute__((address_space(0)))
61003+# define __force_kernel __force __kernel
61004 # define __safe __attribute__((safe))
61005 # define __force __attribute__((force))
61006 # define __nocast __attribute__((nocast))
61007 # define __iomem __attribute__((noderef, address_space(2)))
61008+# define __force_iomem __force __iomem
61009 # define __acquires(x) __attribute__((context(x,0,1)))
61010 # define __releases(x) __attribute__((context(x,1,0)))
61011 # define __acquire(x) __context__(x,1)
61012 # define __release(x) __context__(x,-1)
61013 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
61014 # define __percpu __attribute__((noderef, address_space(3)))
61015+# define __force_percpu __force __percpu
61016 #ifdef CONFIG_SPARSE_RCU_POINTER
61017 # define __rcu __attribute__((noderef, address_space(4)))
61018+# define __force_rcu __force __rcu
61019 #else
61020 # define __rcu
61021+# define __force_rcu
61022 #endif
61023 extern void __chk_user_ptr(const volatile void __user *);
61024 extern void __chk_io_ptr(const volatile void __iomem *);
61025+#elif defined(CHECKER_PLUGIN)
61026+//# define __user
61027+//# define __force_user
61028+//# define __kernel
61029+//# define __force_kernel
61030+# define __safe
61031+# define __force
61032+# define __nocast
61033+# define __iomem
61034+# define __force_iomem
61035+# define __chk_user_ptr(x) (void)0
61036+# define __chk_io_ptr(x) (void)0
61037+# define __builtin_warning(x, y...) (1)
61038+# define __acquires(x)
61039+# define __releases(x)
61040+# define __acquire(x) (void)0
61041+# define __release(x) (void)0
61042+# define __cond_lock(x,c) (c)
61043+# define __percpu
61044+# define __force_percpu
61045+# define __rcu
61046+# define __force_rcu
61047 #else
61048 # define __user
61049+# define __force_user
61050 # define __kernel
61051+# define __force_kernel
61052 # define __safe
61053 # define __force
61054 # define __nocast
61055 # define __iomem
61056+# define __force_iomem
61057 # define __chk_user_ptr(x) (void)0
61058 # define __chk_io_ptr(x) (void)0
61059 # define __builtin_warning(x, y...) (1)
61060@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
61061 # define __release(x) (void)0
61062 # define __cond_lock(x,c) (c)
61063 # define __percpu
61064+# define __force_percpu
61065 # define __rcu
61066+# define __force_rcu
61067 #endif
61068
61069 #ifdef __KERNEL__
61070@@ -264,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
61071 # define __attribute_const__ /* unimplemented */
61072 #endif
61073
61074+#ifndef __no_const
61075+# define __no_const
61076+#endif
61077+
61078+#ifndef __do_const
61079+# define __do_const
61080+#endif
61081+
61082+#ifndef __size_overflow
61083+# define __size_overflow(...)
61084+#endif
61085+
61086+#ifndef __latent_entropy
61087+# define __latent_entropy
61088+#endif
61089+
61090+#ifndef __intentional_overflow
61091+# define __intentional_overflow(...)
61092+#endif
61093+
61094 /*
61095 * Tell gcc if a function is cold. The compiler will assume any path
61096 * directly leading to the call is unlikely.
61097@@ -273,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
61098 #define __cold
61099 #endif
61100
61101+#ifndef __alloc_size
61102+#define __alloc_size(...)
61103+#endif
61104+
61105+#ifndef __bos
61106+#define __bos(ptr, arg)
61107+#endif
61108+
61109+#ifndef __bos0
61110+#define __bos0(ptr)
61111+#endif
61112+
61113+#ifndef __bos1
61114+#define __bos1(ptr)
61115+#endif
61116+
61117 /* Simple shorthand for a section definition */
61118 #ifndef __section
61119 # define __section(S) __attribute__ ((__section__(#S)))
61120@@ -308,6 +377,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
61121 * use is to mediate communication between process-level code and irq/NMI
61122 * handlers, all running on the same CPU.
61123 */
61124-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
61125+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
61126+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
61127
61128 #endif /* __LINUX_COMPILER_H */
61129diff --git a/include/linux/cred.h b/include/linux/cred.h
61130index ebbed2c..908cc2c 100644
61131--- a/include/linux/cred.h
61132+++ b/include/linux/cred.h
61133@@ -208,6 +208,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
61134 static inline void validate_process_creds(void)
61135 {
61136 }
61137+static inline void validate_task_creds(struct task_struct *task)
61138+{
61139+}
61140 #endif
61141
61142 /**
61143diff --git a/include/linux/crypto.h b/include/linux/crypto.h
61144index b92eadf..b4ecdc1 100644
61145--- a/include/linux/crypto.h
61146+++ b/include/linux/crypto.h
61147@@ -373,7 +373,7 @@ struct cipher_tfm {
61148 const u8 *key, unsigned int keylen);
61149 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
61150 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
61151-};
61152+} __no_const;
61153
61154 struct hash_tfm {
61155 int (*init)(struct hash_desc *desc);
61156@@ -394,13 +394,13 @@ struct compress_tfm {
61157 int (*cot_decompress)(struct crypto_tfm *tfm,
61158 const u8 *src, unsigned int slen,
61159 u8 *dst, unsigned int *dlen);
61160-};
61161+} __no_const;
61162
61163 struct rng_tfm {
61164 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
61165 unsigned int dlen);
61166 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
61167-};
61168+} __no_const;
61169
61170 #define crt_ablkcipher crt_u.ablkcipher
61171 #define crt_aead crt_u.aead
61172diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
61173index 7925bf0..d5143d2 100644
61174--- a/include/linux/decompress/mm.h
61175+++ b/include/linux/decompress/mm.h
61176@@ -77,7 +77,7 @@ static void free(void *where)
61177 * warnings when not needed (indeed large_malloc / large_free are not
61178 * needed by inflate */
61179
61180-#define malloc(a) kmalloc(a, GFP_KERNEL)
61181+#define malloc(a) kmalloc((a), GFP_KERNEL)
61182 #define free(a) kfree(a)
61183
61184 #define large_malloc(a) vmalloc(a)
61185diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
61186index dfc099e..e583e66 100644
61187--- a/include/linux/dma-mapping.h
61188+++ b/include/linux/dma-mapping.h
61189@@ -51,7 +51,7 @@ struct dma_map_ops {
61190 u64 (*get_required_mask)(struct device *dev);
61191 #endif
61192 int is_phys;
61193-};
61194+} __do_const;
61195
61196 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
61197
61198diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
61199index 56377df..4eb4990 100644
61200--- a/include/linux/dmaengine.h
61201+++ b/include/linux/dmaengine.h
61202@@ -1007,9 +1007,9 @@ struct dma_pinned_list {
61203 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
61204 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
61205
61206-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
61207+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
61208 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
61209-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
61210+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
61211 struct dma_pinned_list *pinned_list, struct page *page,
61212 unsigned int offset, size_t len);
61213
61214diff --git a/include/linux/efi.h b/include/linux/efi.h
61215index ec45ccd..9923c32 100644
61216--- a/include/linux/efi.h
61217+++ b/include/linux/efi.h
61218@@ -635,7 +635,7 @@ struct efivar_operations {
61219 efi_get_variable_t *get_variable;
61220 efi_get_next_variable_t *get_next_variable;
61221 efi_set_variable_t *set_variable;
61222-};
61223+} __no_const;
61224
61225 struct efivars {
61226 /*
61227diff --git a/include/linux/elf.h b/include/linux/elf.h
61228index 999b4f5..57753b4 100644
61229--- a/include/linux/elf.h
61230+++ b/include/linux/elf.h
61231@@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
61232 #define PT_GNU_EH_FRAME 0x6474e550
61233
61234 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
61235+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
61236+
61237+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
61238+
61239+/* Constants for the e_flags field */
61240+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
61241+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
61242+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
61243+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
61244+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
61245+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
61246
61247 /*
61248 * Extended Numbering
61249@@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
61250 #define DT_DEBUG 21
61251 #define DT_TEXTREL 22
61252 #define DT_JMPREL 23
61253+#define DT_FLAGS 30
61254+ #define DF_TEXTREL 0x00000004
61255 #define DT_ENCODING 32
61256 #define OLD_DT_LOOS 0x60000000
61257 #define DT_LOOS 0x6000000d
61258@@ -243,6 +256,19 @@ typedef struct elf64_hdr {
61259 #define PF_W 0x2
61260 #define PF_X 0x1
61261
61262+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
61263+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
61264+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
61265+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
61266+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
61267+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
61268+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
61269+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
61270+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
61271+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
61272+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
61273+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
61274+
61275 typedef struct elf32_phdr{
61276 Elf32_Word p_type;
61277 Elf32_Off p_offset;
61278@@ -335,6 +361,8 @@ typedef struct elf64_shdr {
61279 #define EI_OSABI 7
61280 #define EI_PAD 8
61281
61282+#define EI_PAX 14
61283+
61284 #define ELFMAG0 0x7f /* EI_MAG */
61285 #define ELFMAG1 'E'
61286 #define ELFMAG2 'L'
61287@@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
61288 #define elf_note elf32_note
61289 #define elf_addr_t Elf32_Off
61290 #define Elf_Half Elf32_Half
61291+#define elf_dyn Elf32_Dyn
61292
61293 #else
61294
61295@@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
61296 #define elf_note elf64_note
61297 #define elf_addr_t Elf64_Off
61298 #define Elf_Half Elf64_Half
61299+#define elf_dyn Elf64_Dyn
61300
61301 #endif
61302
61303diff --git a/include/linux/filter.h b/include/linux/filter.h
61304index 82b0135..917914d 100644
61305--- a/include/linux/filter.h
61306+++ b/include/linux/filter.h
61307@@ -146,6 +146,7 @@ struct compat_sock_fprog {
61308
61309 struct sk_buff;
61310 struct sock;
61311+struct bpf_jit_work;
61312
61313 struct sk_filter
61314 {
61315@@ -153,6 +154,9 @@ struct sk_filter
61316 unsigned int len; /* Number of filter blocks */
61317 unsigned int (*bpf_func)(const struct sk_buff *skb,
61318 const struct sock_filter *filter);
61319+#ifdef CONFIG_BPF_JIT
61320+ struct bpf_jit_work *work;
61321+#endif
61322 struct rcu_head rcu;
61323 struct sock_filter insns[0];
61324 };
61325diff --git a/include/linux/firewire.h b/include/linux/firewire.h
61326index 7edcf10..714d5e8 100644
61327--- a/include/linux/firewire.h
61328+++ b/include/linux/firewire.h
61329@@ -430,7 +430,7 @@ struct fw_iso_context {
61330 union {
61331 fw_iso_callback_t sc;
61332 fw_iso_mc_callback_t mc;
61333- } callback;
61334+ } __no_const callback;
61335 void *callback_data;
61336 };
61337
61338diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
61339index 0e4e2ee..4ff4312 100644
61340--- a/include/linux/frontswap.h
61341+++ b/include/linux/frontswap.h
61342@@ -11,7 +11,7 @@ struct frontswap_ops {
61343 int (*load)(unsigned, pgoff_t, struct page *);
61344 void (*invalidate_page)(unsigned, pgoff_t);
61345 void (*invalidate_area)(unsigned);
61346-};
61347+} __no_const;
61348
61349 extern bool frontswap_enabled;
61350 extern struct frontswap_ops
61351diff --git a/include/linux/fs.h b/include/linux/fs.h
61352index 17fd887..8eebca0 100644
61353--- a/include/linux/fs.h
61354+++ b/include/linux/fs.h
61355@@ -1663,7 +1663,8 @@ struct file_operations {
61356 int (*setlease)(struct file *, long, struct file_lock **);
61357 long (*fallocate)(struct file *file, int mode, loff_t offset,
61358 loff_t len);
61359-};
61360+} __do_const;
61361+typedef struct file_operations __no_const file_operations_no_const;
61362
61363 struct inode_operations {
61364 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
61365diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
61366index 003dc0f..3c4ea97 100644
61367--- a/include/linux/fs_struct.h
61368+++ b/include/linux/fs_struct.h
61369@@ -6,7 +6,7 @@
61370 #include <linux/seqlock.h>
61371
61372 struct fs_struct {
61373- int users;
61374+ atomic_t users;
61375 spinlock_t lock;
61376 seqcount_t seq;
61377 int umask;
61378diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
61379index ce31408..b1ad003 100644
61380--- a/include/linux/fscache-cache.h
61381+++ b/include/linux/fscache-cache.h
61382@@ -102,7 +102,7 @@ struct fscache_operation {
61383 fscache_operation_release_t release;
61384 };
61385
61386-extern atomic_t fscache_op_debug_id;
61387+extern atomic_unchecked_t fscache_op_debug_id;
61388 extern void fscache_op_work_func(struct work_struct *work);
61389
61390 extern void fscache_enqueue_operation(struct fscache_operation *);
61391@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
61392 {
61393 INIT_WORK(&op->work, fscache_op_work_func);
61394 atomic_set(&op->usage, 1);
61395- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
61396+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
61397 op->processor = processor;
61398 op->release = release;
61399 INIT_LIST_HEAD(&op->pend_link);
61400diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
61401index a6dfe69..569586df 100644
61402--- a/include/linux/fsnotify.h
61403+++ b/include/linux/fsnotify.h
61404@@ -315,7 +315,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
61405 */
61406 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
61407 {
61408- return kstrdup(name, GFP_KERNEL);
61409+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
61410 }
61411
61412 /*
61413diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
61414index 63d966d..cdcb717 100644
61415--- a/include/linux/fsnotify_backend.h
61416+++ b/include/linux/fsnotify_backend.h
61417@@ -105,6 +105,7 @@ struct fsnotify_ops {
61418 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
61419 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
61420 };
61421+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
61422
61423 /*
61424 * A group is a "thing" that wants to receive notification about filesystem
61425diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
61426index 176a939..1462211 100644
61427--- a/include/linux/ftrace_event.h
61428+++ b/include/linux/ftrace_event.h
61429@@ -97,7 +97,7 @@ struct trace_event_functions {
61430 trace_print_func raw;
61431 trace_print_func hex;
61432 trace_print_func binary;
61433-};
61434+} __no_const;
61435
61436 struct trace_event {
61437 struct hlist_node node;
61438@@ -263,7 +263,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
61439 extern int trace_add_event_call(struct ftrace_event_call *call);
61440 extern void trace_remove_event_call(struct ftrace_event_call *call);
61441
61442-#define is_signed_type(type) (((type)(-1)) < 0)
61443+#define is_signed_type(type) (((type)(-1)) < (type)1)
61444
61445 int trace_set_clr_event(const char *system, const char *event, int set);
61446
61447diff --git a/include/linux/genhd.h b/include/linux/genhd.h
61448index 017a7fb..33a8507 100644
61449--- a/include/linux/genhd.h
61450+++ b/include/linux/genhd.h
61451@@ -185,7 +185,7 @@ struct gendisk {
61452 struct kobject *slave_dir;
61453
61454 struct timer_rand_state *random;
61455- atomic_t sync_io; /* RAID */
61456+ atomic_unchecked_t sync_io; /* RAID */
61457 struct disk_events *ev;
61458 #ifdef CONFIG_BLK_DEV_INTEGRITY
61459 struct blk_integrity *integrity;
61460diff --git a/include/linux/gfp.h b/include/linux/gfp.h
61461index 1e49be4..b8a9305 100644
61462--- a/include/linux/gfp.h
61463+++ b/include/linux/gfp.h
61464@@ -38,6 +38,12 @@ struct vm_area_struct;
61465 #define ___GFP_OTHER_NODE 0x800000u
61466 #define ___GFP_WRITE 0x1000000u
61467
61468+#ifdef CONFIG_PAX_USERCOPY_SLABS
61469+#define ___GFP_USERCOPY 0x2000000u
61470+#else
61471+#define ___GFP_USERCOPY 0
61472+#endif
61473+
61474 /*
61475 * GFP bitmasks..
61476 *
61477@@ -87,6 +93,7 @@ struct vm_area_struct;
61478 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
61479 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
61480 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
61481+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
61482
61483 /*
61484 * This may seem redundant, but it's a way of annotating false positives vs.
61485@@ -94,7 +101,7 @@ struct vm_area_struct;
61486 */
61487 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
61488
61489-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
61490+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
61491 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
61492
61493 /* This equals 0, but use constants in case they ever change */
61494@@ -148,6 +155,8 @@ struct vm_area_struct;
61495 /* 4GB DMA on some platforms */
61496 #define GFP_DMA32 __GFP_DMA32
61497
61498+#define GFP_USERCOPY __GFP_USERCOPY
61499+
61500 /* Convert GFP flags to their corresponding migrate type */
61501 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
61502 {
61503diff --git a/include/linux/gracl.h b/include/linux/gracl.h
61504new file mode 100644
61505index 0000000..c938b1f
61506--- /dev/null
61507+++ b/include/linux/gracl.h
61508@@ -0,0 +1,319 @@
61509+#ifndef GR_ACL_H
61510+#define GR_ACL_H
61511+
61512+#include <linux/grdefs.h>
61513+#include <linux/resource.h>
61514+#include <linux/capability.h>
61515+#include <linux/dcache.h>
61516+#include <asm/resource.h>
61517+
61518+/* Major status information */
61519+
61520+#define GR_VERSION "grsecurity 2.9.1"
61521+#define GRSECURITY_VERSION 0x2901
61522+
61523+enum {
61524+ GR_SHUTDOWN = 0,
61525+ GR_ENABLE = 1,
61526+ GR_SPROLE = 2,
61527+ GR_RELOAD = 3,
61528+ GR_SEGVMOD = 4,
61529+ GR_STATUS = 5,
61530+ GR_UNSPROLE = 6,
61531+ GR_PASSSET = 7,
61532+ GR_SPROLEPAM = 8,
61533+};
61534+
61535+/* Password setup definitions
61536+ * kernel/grhash.c */
61537+enum {
61538+ GR_PW_LEN = 128,
61539+ GR_SALT_LEN = 16,
61540+ GR_SHA_LEN = 32,
61541+};
61542+
61543+enum {
61544+ GR_SPROLE_LEN = 64,
61545+};
61546+
61547+enum {
61548+ GR_NO_GLOB = 0,
61549+ GR_REG_GLOB,
61550+ GR_CREATE_GLOB
61551+};
61552+
61553+#define GR_NLIMITS 32
61554+
61555+/* Begin Data Structures */
61556+
61557+struct sprole_pw {
61558+ unsigned char *rolename;
61559+ unsigned char salt[GR_SALT_LEN];
61560+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
61561+};
61562+
61563+struct name_entry {
61564+ __u32 key;
61565+ ino_t inode;
61566+ dev_t device;
61567+ char *name;
61568+ __u16 len;
61569+ __u8 deleted;
61570+ struct name_entry *prev;
61571+ struct name_entry *next;
61572+};
61573+
61574+struct inodev_entry {
61575+ struct name_entry *nentry;
61576+ struct inodev_entry *prev;
61577+ struct inodev_entry *next;
61578+};
61579+
61580+struct acl_role_db {
61581+ struct acl_role_label **r_hash;
61582+ __u32 r_size;
61583+};
61584+
61585+struct inodev_db {
61586+ struct inodev_entry **i_hash;
61587+ __u32 i_size;
61588+};
61589+
61590+struct name_db {
61591+ struct name_entry **n_hash;
61592+ __u32 n_size;
61593+};
61594+
61595+struct crash_uid {
61596+ uid_t uid;
61597+ unsigned long expires;
61598+};
61599+
61600+struct gr_hash_struct {
61601+ void **table;
61602+ void **nametable;
61603+ void *first;
61604+ __u32 table_size;
61605+ __u32 used_size;
61606+ int type;
61607+};
61608+
61609+/* Userspace Grsecurity ACL data structures */
61610+
61611+struct acl_subject_label {
61612+ char *filename;
61613+ ino_t inode;
61614+ dev_t device;
61615+ __u32 mode;
61616+ kernel_cap_t cap_mask;
61617+ kernel_cap_t cap_lower;
61618+ kernel_cap_t cap_invert_audit;
61619+
61620+ struct rlimit res[GR_NLIMITS];
61621+ __u32 resmask;
61622+
61623+ __u8 user_trans_type;
61624+ __u8 group_trans_type;
61625+ uid_t *user_transitions;
61626+ gid_t *group_transitions;
61627+ __u16 user_trans_num;
61628+ __u16 group_trans_num;
61629+
61630+ __u32 sock_families[2];
61631+ __u32 ip_proto[8];
61632+ __u32 ip_type;
61633+ struct acl_ip_label **ips;
61634+ __u32 ip_num;
61635+ __u32 inaddr_any_override;
61636+
61637+ __u32 crashes;
61638+ unsigned long expires;
61639+
61640+ struct acl_subject_label *parent_subject;
61641+ struct gr_hash_struct *hash;
61642+ struct acl_subject_label *prev;
61643+ struct acl_subject_label *next;
61644+
61645+ struct acl_object_label **obj_hash;
61646+ __u32 obj_hash_size;
61647+ __u16 pax_flags;
61648+};
61649+
61650+struct role_allowed_ip {
61651+ __u32 addr;
61652+ __u32 netmask;
61653+
61654+ struct role_allowed_ip *prev;
61655+ struct role_allowed_ip *next;
61656+};
61657+
61658+struct role_transition {
61659+ char *rolename;
61660+
61661+ struct role_transition *prev;
61662+ struct role_transition *next;
61663+};
61664+
61665+struct acl_role_label {
61666+ char *rolename;
61667+ uid_t uidgid;
61668+ __u16 roletype;
61669+
61670+ __u16 auth_attempts;
61671+ unsigned long expires;
61672+
61673+ struct acl_subject_label *root_label;
61674+ struct gr_hash_struct *hash;
61675+
61676+ struct acl_role_label *prev;
61677+ struct acl_role_label *next;
61678+
61679+ struct role_transition *transitions;
61680+ struct role_allowed_ip *allowed_ips;
61681+ uid_t *domain_children;
61682+ __u16 domain_child_num;
61683+
61684+ umode_t umask;
61685+
61686+ struct acl_subject_label **subj_hash;
61687+ __u32 subj_hash_size;
61688+};
61689+
61690+struct user_acl_role_db {
61691+ struct acl_role_label **r_table;
61692+ __u32 num_pointers; /* Number of allocations to track */
61693+ __u32 num_roles; /* Number of roles */
61694+ __u32 num_domain_children; /* Number of domain children */
61695+ __u32 num_subjects; /* Number of subjects */
61696+ __u32 num_objects; /* Number of objects */
61697+};
61698+
61699+struct acl_object_label {
61700+ char *filename;
61701+ ino_t inode;
61702+ dev_t device;
61703+ __u32 mode;
61704+
61705+ struct acl_subject_label *nested;
61706+ struct acl_object_label *globbed;
61707+
61708+ /* next two structures not used */
61709+
61710+ struct acl_object_label *prev;
61711+ struct acl_object_label *next;
61712+};
61713+
61714+struct acl_ip_label {
61715+ char *iface;
61716+ __u32 addr;
61717+ __u32 netmask;
61718+ __u16 low, high;
61719+ __u8 mode;
61720+ __u32 type;
61721+ __u32 proto[8];
61722+
61723+ /* next two structures not used */
61724+
61725+ struct acl_ip_label *prev;
61726+ struct acl_ip_label *next;
61727+};
61728+
61729+struct gr_arg {
61730+ struct user_acl_role_db role_db;
61731+ unsigned char pw[GR_PW_LEN];
61732+ unsigned char salt[GR_SALT_LEN];
61733+ unsigned char sum[GR_SHA_LEN];
61734+ unsigned char sp_role[GR_SPROLE_LEN];
61735+ struct sprole_pw *sprole_pws;
61736+ dev_t segv_device;
61737+ ino_t segv_inode;
61738+ uid_t segv_uid;
61739+ __u16 num_sprole_pws;
61740+ __u16 mode;
61741+};
61742+
61743+struct gr_arg_wrapper {
61744+ struct gr_arg *arg;
61745+ __u32 version;
61746+ __u32 size;
61747+};
61748+
61749+struct subject_map {
61750+ struct acl_subject_label *user;
61751+ struct acl_subject_label *kernel;
61752+ struct subject_map *prev;
61753+ struct subject_map *next;
61754+};
61755+
61756+struct acl_subj_map_db {
61757+ struct subject_map **s_hash;
61758+ __u32 s_size;
61759+};
61760+
61761+/* End Data Structures Section */
61762+
61763+/* Hash functions generated by empirical testing by Brad Spengler
61764+ Makes good use of the low bits of the inode. Generally 0-1 times
61765+ in loop for successful match. 0-3 for unsuccessful match.
61766+ Shift/add algorithm with modulus of table size and an XOR*/
61767+
61768+static __inline__ unsigned int
61769+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
61770+{
61771+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
61772+}
61773+
61774+ static __inline__ unsigned int
61775+shash(const struct acl_subject_label *userp, const unsigned int sz)
61776+{
61777+ return ((const unsigned long)userp % sz);
61778+}
61779+
61780+static __inline__ unsigned int
61781+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
61782+{
61783+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
61784+}
61785+
61786+static __inline__ unsigned int
61787+nhash(const char *name, const __u16 len, const unsigned int sz)
61788+{
61789+ return full_name_hash((const unsigned char *)name, len) % sz;
61790+}
61791+
61792+#define FOR_EACH_ROLE_START(role) \
61793+ role = role_list; \
61794+ while (role) {
61795+
61796+#define FOR_EACH_ROLE_END(role) \
61797+ role = role->prev; \
61798+ }
61799+
61800+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
61801+ subj = NULL; \
61802+ iter = 0; \
61803+ while (iter < role->subj_hash_size) { \
61804+ if (subj == NULL) \
61805+ subj = role->subj_hash[iter]; \
61806+ if (subj == NULL) { \
61807+ iter++; \
61808+ continue; \
61809+ }
61810+
61811+#define FOR_EACH_SUBJECT_END(subj,iter) \
61812+ subj = subj->next; \
61813+ if (subj == NULL) \
61814+ iter++; \
61815+ }
61816+
61817+
61818+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
61819+ subj = role->hash->first; \
61820+ while (subj != NULL) {
61821+
61822+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
61823+ subj = subj->next; \
61824+ }
61825+
61826+#endif
61827+
61828diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
61829new file mode 100644
61830index 0000000..323ecf2
61831--- /dev/null
61832+++ b/include/linux/gralloc.h
61833@@ -0,0 +1,9 @@
61834+#ifndef __GRALLOC_H
61835+#define __GRALLOC_H
61836+
61837+void acl_free_all(void);
61838+int acl_alloc_stack_init(unsigned long size);
61839+void *acl_alloc(unsigned long len);
61840+void *acl_alloc_num(unsigned long num, unsigned long len);
61841+
61842+#endif
61843diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
61844new file mode 100644
61845index 0000000..b30e9bc
61846--- /dev/null
61847+++ b/include/linux/grdefs.h
61848@@ -0,0 +1,140 @@
61849+#ifndef GRDEFS_H
61850+#define GRDEFS_H
61851+
61852+/* Begin grsecurity status declarations */
61853+
61854+enum {
61855+ GR_READY = 0x01,
61856+ GR_STATUS_INIT = 0x00 // disabled state
61857+};
61858+
61859+/* Begin ACL declarations */
61860+
61861+/* Role flags */
61862+
61863+enum {
61864+ GR_ROLE_USER = 0x0001,
61865+ GR_ROLE_GROUP = 0x0002,
61866+ GR_ROLE_DEFAULT = 0x0004,
61867+ GR_ROLE_SPECIAL = 0x0008,
61868+ GR_ROLE_AUTH = 0x0010,
61869+ GR_ROLE_NOPW = 0x0020,
61870+ GR_ROLE_GOD = 0x0040,
61871+ GR_ROLE_LEARN = 0x0080,
61872+ GR_ROLE_TPE = 0x0100,
61873+ GR_ROLE_DOMAIN = 0x0200,
61874+ GR_ROLE_PAM = 0x0400,
61875+ GR_ROLE_PERSIST = 0x0800
61876+};
61877+
61878+/* ACL Subject and Object mode flags */
61879+enum {
61880+ GR_DELETED = 0x80000000
61881+};
61882+
61883+/* ACL Object-only mode flags */
61884+enum {
61885+ GR_READ = 0x00000001,
61886+ GR_APPEND = 0x00000002,
61887+ GR_WRITE = 0x00000004,
61888+ GR_EXEC = 0x00000008,
61889+ GR_FIND = 0x00000010,
61890+ GR_INHERIT = 0x00000020,
61891+ GR_SETID = 0x00000040,
61892+ GR_CREATE = 0x00000080,
61893+ GR_DELETE = 0x00000100,
61894+ GR_LINK = 0x00000200,
61895+ GR_AUDIT_READ = 0x00000400,
61896+ GR_AUDIT_APPEND = 0x00000800,
61897+ GR_AUDIT_WRITE = 0x00001000,
61898+ GR_AUDIT_EXEC = 0x00002000,
61899+ GR_AUDIT_FIND = 0x00004000,
61900+ GR_AUDIT_INHERIT= 0x00008000,
61901+ GR_AUDIT_SETID = 0x00010000,
61902+ GR_AUDIT_CREATE = 0x00020000,
61903+ GR_AUDIT_DELETE = 0x00040000,
61904+ GR_AUDIT_LINK = 0x00080000,
61905+ GR_PTRACERD = 0x00100000,
61906+ GR_NOPTRACE = 0x00200000,
61907+ GR_SUPPRESS = 0x00400000,
61908+ GR_NOLEARN = 0x00800000,
61909+ GR_INIT_TRANSFER= 0x01000000
61910+};
61911+
61912+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
61913+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
61914+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
61915+
61916+/* ACL subject-only mode flags */
61917+enum {
61918+ GR_KILL = 0x00000001,
61919+ GR_VIEW = 0x00000002,
61920+ GR_PROTECTED = 0x00000004,
61921+ GR_LEARN = 0x00000008,
61922+ GR_OVERRIDE = 0x00000010,
61923+ /* just a placeholder, this mode is only used in userspace */
61924+ GR_DUMMY = 0x00000020,
61925+ GR_PROTSHM = 0x00000040,
61926+ GR_KILLPROC = 0x00000080,
61927+ GR_KILLIPPROC = 0x00000100,
61928+ /* just a placeholder, this mode is only used in userspace */
61929+ GR_NOTROJAN = 0x00000200,
61930+ GR_PROTPROCFD = 0x00000400,
61931+ GR_PROCACCT = 0x00000800,
61932+ GR_RELAXPTRACE = 0x00001000,
61933+ GR_NESTED = 0x00002000,
61934+ GR_INHERITLEARN = 0x00004000,
61935+ GR_PROCFIND = 0x00008000,
61936+ GR_POVERRIDE = 0x00010000,
61937+ GR_KERNELAUTH = 0x00020000,
61938+ GR_ATSECURE = 0x00040000,
61939+ GR_SHMEXEC = 0x00080000
61940+};
61941+
61942+enum {
61943+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
61944+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
61945+ GR_PAX_ENABLE_MPROTECT = 0x0004,
61946+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
61947+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
61948+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
61949+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
61950+ GR_PAX_DISABLE_MPROTECT = 0x0400,
61951+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
61952+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
61953+};
61954+
61955+enum {
61956+ GR_ID_USER = 0x01,
61957+ GR_ID_GROUP = 0x02,
61958+};
61959+
61960+enum {
61961+ GR_ID_ALLOW = 0x01,
61962+ GR_ID_DENY = 0x02,
61963+};
61964+
61965+#define GR_CRASH_RES 31
61966+#define GR_UIDTABLE_MAX 500
61967+
61968+/* begin resource learning section */
61969+enum {
61970+ GR_RLIM_CPU_BUMP = 60,
61971+ GR_RLIM_FSIZE_BUMP = 50000,
61972+ GR_RLIM_DATA_BUMP = 10000,
61973+ GR_RLIM_STACK_BUMP = 1000,
61974+ GR_RLIM_CORE_BUMP = 10000,
61975+ GR_RLIM_RSS_BUMP = 500000,
61976+ GR_RLIM_NPROC_BUMP = 1,
61977+ GR_RLIM_NOFILE_BUMP = 5,
61978+ GR_RLIM_MEMLOCK_BUMP = 50000,
61979+ GR_RLIM_AS_BUMP = 500000,
61980+ GR_RLIM_LOCKS_BUMP = 2,
61981+ GR_RLIM_SIGPENDING_BUMP = 5,
61982+ GR_RLIM_MSGQUEUE_BUMP = 10000,
61983+ GR_RLIM_NICE_BUMP = 1,
61984+ GR_RLIM_RTPRIO_BUMP = 1,
61985+ GR_RLIM_RTTIME_BUMP = 1000000
61986+};
61987+
61988+#endif
61989diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
61990new file mode 100644
61991index 0000000..c9292f7
61992--- /dev/null
61993+++ b/include/linux/grinternal.h
61994@@ -0,0 +1,223 @@
61995+#ifndef __GRINTERNAL_H
61996+#define __GRINTERNAL_H
61997+
61998+#ifdef CONFIG_GRKERNSEC
61999+
62000+#include <linux/fs.h>
62001+#include <linux/mnt_namespace.h>
62002+#include <linux/nsproxy.h>
62003+#include <linux/gracl.h>
62004+#include <linux/grdefs.h>
62005+#include <linux/grmsg.h>
62006+
62007+void gr_add_learn_entry(const char *fmt, ...)
62008+ __attribute__ ((format (printf, 1, 2)));
62009+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
62010+ const struct vfsmount *mnt);
62011+__u32 gr_check_create(const struct dentry *new_dentry,
62012+ const struct dentry *parent,
62013+ const struct vfsmount *mnt, const __u32 mode);
62014+int gr_check_protected_task(const struct task_struct *task);
62015+__u32 to_gr_audit(const __u32 reqmode);
62016+int gr_set_acls(const int type);
62017+int gr_apply_subject_to_task(struct task_struct *task);
62018+int gr_acl_is_enabled(void);
62019+char gr_roletype_to_char(void);
62020+
62021+void gr_handle_alertkill(struct task_struct *task);
62022+char *gr_to_filename(const struct dentry *dentry,
62023+ const struct vfsmount *mnt);
62024+char *gr_to_filename1(const struct dentry *dentry,
62025+ const struct vfsmount *mnt);
62026+char *gr_to_filename2(const struct dentry *dentry,
62027+ const struct vfsmount *mnt);
62028+char *gr_to_filename3(const struct dentry *dentry,
62029+ const struct vfsmount *mnt);
62030+
62031+extern int grsec_enable_ptrace_readexec;
62032+extern int grsec_enable_harden_ptrace;
62033+extern int grsec_enable_link;
62034+extern int grsec_enable_fifo;
62035+extern int grsec_enable_execve;
62036+extern int grsec_enable_shm;
62037+extern int grsec_enable_execlog;
62038+extern int grsec_enable_signal;
62039+extern int grsec_enable_audit_ptrace;
62040+extern int grsec_enable_forkfail;
62041+extern int grsec_enable_time;
62042+extern int grsec_enable_rofs;
62043+extern int grsec_enable_chroot_shmat;
62044+extern int grsec_enable_chroot_mount;
62045+extern int grsec_enable_chroot_double;
62046+extern int grsec_enable_chroot_pivot;
62047+extern int grsec_enable_chroot_chdir;
62048+extern int grsec_enable_chroot_chmod;
62049+extern int grsec_enable_chroot_mknod;
62050+extern int grsec_enable_chroot_fchdir;
62051+extern int grsec_enable_chroot_nice;
62052+extern int grsec_enable_chroot_execlog;
62053+extern int grsec_enable_chroot_caps;
62054+extern int grsec_enable_chroot_sysctl;
62055+extern int grsec_enable_chroot_unix;
62056+extern int grsec_enable_symlinkown;
62057+extern int grsec_symlinkown_gid;
62058+extern int grsec_enable_tpe;
62059+extern int grsec_tpe_gid;
62060+extern int grsec_enable_tpe_all;
62061+extern int grsec_enable_tpe_invert;
62062+extern int grsec_enable_socket_all;
62063+extern int grsec_socket_all_gid;
62064+extern int grsec_enable_socket_client;
62065+extern int grsec_socket_client_gid;
62066+extern int grsec_enable_socket_server;
62067+extern int grsec_socket_server_gid;
62068+extern int grsec_audit_gid;
62069+extern int grsec_enable_group;
62070+extern int grsec_enable_audit_textrel;
62071+extern int grsec_enable_log_rwxmaps;
62072+extern int grsec_enable_mount;
62073+extern int grsec_enable_chdir;
62074+extern int grsec_resource_logging;
62075+extern int grsec_enable_blackhole;
62076+extern int grsec_lastack_retries;
62077+extern int grsec_enable_brute;
62078+extern int grsec_lock;
62079+
62080+extern spinlock_t grsec_alert_lock;
62081+extern unsigned long grsec_alert_wtime;
62082+extern unsigned long grsec_alert_fyet;
62083+
62084+extern spinlock_t grsec_audit_lock;
62085+
62086+extern rwlock_t grsec_exec_file_lock;
62087+
62088+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
62089+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
62090+ (tsk)->exec_file->f_vfsmnt) : "/")
62091+
62092+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
62093+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
62094+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
62095+
62096+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
62097+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
62098+ (tsk)->exec_file->f_vfsmnt) : "/")
62099+
62100+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
62101+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
62102+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
62103+
62104+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
62105+
62106+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
62107+
62108+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
62109+ (task)->pid, (cred)->uid, \
62110+ (cred)->euid, (cred)->gid, (cred)->egid, \
62111+ gr_parent_task_fullpath(task), \
62112+ (task)->real_parent->comm, (task)->real_parent->pid, \
62113+ (pcred)->uid, (pcred)->euid, \
62114+ (pcred)->gid, (pcred)->egid
62115+
62116+#define GR_CHROOT_CAPS {{ \
62117+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
62118+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
62119+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
62120+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
62121+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
62122+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
62123+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
62124+
62125+#define security_learn(normal_msg,args...) \
62126+({ \
62127+ read_lock(&grsec_exec_file_lock); \
62128+ gr_add_learn_entry(normal_msg "\n", ## args); \
62129+ read_unlock(&grsec_exec_file_lock); \
62130+})
62131+
62132+enum {
62133+ GR_DO_AUDIT,
62134+ GR_DONT_AUDIT,
62135+ /* used for non-audit messages that we shouldn't kill the task on */
62136+ GR_DONT_AUDIT_GOOD
62137+};
62138+
62139+enum {
62140+ GR_TTYSNIFF,
62141+ GR_RBAC,
62142+ GR_RBAC_STR,
62143+ GR_STR_RBAC,
62144+ GR_RBAC_MODE2,
62145+ GR_RBAC_MODE3,
62146+ GR_FILENAME,
62147+ GR_SYSCTL_HIDDEN,
62148+ GR_NOARGS,
62149+ GR_ONE_INT,
62150+ GR_ONE_INT_TWO_STR,
62151+ GR_ONE_STR,
62152+ GR_STR_INT,
62153+ GR_TWO_STR_INT,
62154+ GR_TWO_INT,
62155+ GR_TWO_U64,
62156+ GR_THREE_INT,
62157+ GR_FIVE_INT_TWO_STR,
62158+ GR_TWO_STR,
62159+ GR_THREE_STR,
62160+ GR_FOUR_STR,
62161+ GR_STR_FILENAME,
62162+ GR_FILENAME_STR,
62163+ GR_FILENAME_TWO_INT,
62164+ GR_FILENAME_TWO_INT_STR,
62165+ GR_TEXTREL,
62166+ GR_PTRACE,
62167+ GR_RESOURCE,
62168+ GR_CAP,
62169+ GR_SIG,
62170+ GR_SIG2,
62171+ GR_CRASH1,
62172+ GR_CRASH2,
62173+ GR_PSACCT,
62174+ GR_RWXMAP
62175+};
62176+
62177+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
62178+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
62179+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
62180+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
62181+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
62182+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
62183+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
62184+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
62185+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
62186+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
62187+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
62188+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
62189+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
62190+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
62191+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
62192+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
62193+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
62194+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
62195+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
62196+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
62197+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
62198+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
62199+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
62200+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
62201+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
62202+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
62203+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
62204+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
62205+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
62206+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
62207+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
62208+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
62209+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
62210+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
62211+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
62212+
62213+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
62214+
62215+#endif
62216+
62217+#endif
62218diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
62219new file mode 100644
62220index 0000000..54f4e85
62221--- /dev/null
62222+++ b/include/linux/grmsg.h
62223@@ -0,0 +1,110 @@
62224+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
62225+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
62226+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
62227+#define GR_STOPMOD_MSG "denied modification of module state by "
62228+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
62229+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
62230+#define GR_IOPERM_MSG "denied use of ioperm() by "
62231+#define GR_IOPL_MSG "denied use of iopl() by "
62232+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
62233+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
62234+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
62235+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
62236+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
62237+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
62238+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
62239+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
62240+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
62241+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
62242+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
62243+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
62244+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
62245+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
62246+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
62247+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
62248+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
62249+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
62250+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
62251+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
62252+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
62253+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
62254+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
62255+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
62256+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
62257+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
62258+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
62259+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
62260+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
62261+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
62262+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
62263+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
62264+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
62265+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
62266+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
62267+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
62268+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
62269+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
62270+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
62271+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
62272+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
62273+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
62274+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
62275+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
62276+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
62277+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
62278+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
62279+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
62280+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
62281+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
62282+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
62283+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
62284+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
62285+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
62286+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
62287+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
62288+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
62289+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
62290+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
62291+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
62292+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
62293+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
62294+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
62295+#define GR_FAILFORK_MSG "failed fork with errno %s by "
62296+#define GR_NICE_CHROOT_MSG "denied priority change by "
62297+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
62298+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
62299+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
62300+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
62301+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
62302+#define GR_TIME_MSG "time set by "
62303+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
62304+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
62305+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
62306+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
62307+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
62308+#define GR_BIND_MSG "denied bind() by "
62309+#define GR_CONNECT_MSG "denied connect() by "
62310+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
62311+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
62312+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
62313+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
62314+#define GR_CAP_ACL_MSG "use of %s denied for "
62315+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
62316+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
62317+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
62318+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
62319+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
62320+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
62321+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
62322+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
62323+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
62324+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
62325+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
62326+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
62327+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
62328+#define GR_VM86_MSG "denied use of vm86 by "
62329+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
62330+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
62331+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
62332+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
62333+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
62334diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
62335new file mode 100644
62336index 0000000..38bfb04
62337--- /dev/null
62338+++ b/include/linux/grsecurity.h
62339@@ -0,0 +1,233 @@
62340+#ifndef GR_SECURITY_H
62341+#define GR_SECURITY_H
62342+#include <linux/fs.h>
62343+#include <linux/fs_struct.h>
62344+#include <linux/binfmts.h>
62345+#include <linux/gracl.h>
62346+
62347+/* notify of brain-dead configs */
62348+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62349+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
62350+#endif
62351+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
62352+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
62353+#endif
62354+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
62355+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
62356+#endif
62357+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
62358+#error "CONFIG_PAX enabled, but no PaX options are enabled."
62359+#endif
62360+
62361+#include <linux/compat.h>
62362+
62363+struct user_arg_ptr {
62364+#ifdef CONFIG_COMPAT
62365+ bool is_compat;
62366+#endif
62367+ union {
62368+ const char __user *const __user *native;
62369+#ifdef CONFIG_COMPAT
62370+ compat_uptr_t __user *compat;
62371+#endif
62372+ } ptr;
62373+};
62374+
62375+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
62376+void gr_handle_brute_check(void);
62377+void gr_handle_kernel_exploit(void);
62378+int gr_process_user_ban(void);
62379+
62380+char gr_roletype_to_char(void);
62381+
62382+int gr_acl_enable_at_secure(void);
62383+
62384+int gr_check_user_change(int real, int effective, int fs);
62385+int gr_check_group_change(int real, int effective, int fs);
62386+
62387+void gr_del_task_from_ip_table(struct task_struct *p);
62388+
62389+int gr_pid_is_chrooted(struct task_struct *p);
62390+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
62391+int gr_handle_chroot_nice(void);
62392+int gr_handle_chroot_sysctl(const int op);
62393+int gr_handle_chroot_setpriority(struct task_struct *p,
62394+ const int niceval);
62395+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
62396+int gr_handle_chroot_chroot(const struct dentry *dentry,
62397+ const struct vfsmount *mnt);
62398+void gr_handle_chroot_chdir(struct path *path);
62399+int gr_handle_chroot_chmod(const struct dentry *dentry,
62400+ const struct vfsmount *mnt, const int mode);
62401+int gr_handle_chroot_mknod(const struct dentry *dentry,
62402+ const struct vfsmount *mnt, const int mode);
62403+int gr_handle_chroot_mount(const struct dentry *dentry,
62404+ const struct vfsmount *mnt,
62405+ const char *dev_name);
62406+int gr_handle_chroot_pivot(void);
62407+int gr_handle_chroot_unix(const pid_t pid);
62408+
62409+int gr_handle_rawio(const struct inode *inode);
62410+
62411+void gr_handle_ioperm(void);
62412+void gr_handle_iopl(void);
62413+
62414+umode_t gr_acl_umask(void);
62415+
62416+int gr_tpe_allow(const struct file *file);
62417+
62418+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
62419+void gr_clear_chroot_entries(struct task_struct *task);
62420+
62421+void gr_log_forkfail(const int retval);
62422+void gr_log_timechange(void);
62423+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
62424+void gr_log_chdir(const struct dentry *dentry,
62425+ const struct vfsmount *mnt);
62426+void gr_log_chroot_exec(const struct dentry *dentry,
62427+ const struct vfsmount *mnt);
62428+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
62429+void gr_log_remount(const char *devname, const int retval);
62430+void gr_log_unmount(const char *devname, const int retval);
62431+void gr_log_mount(const char *from, const char *to, const int retval);
62432+void gr_log_textrel(struct vm_area_struct *vma);
62433+void gr_log_rwxmmap(struct file *file);
62434+void gr_log_rwxmprotect(struct file *file);
62435+
62436+int gr_handle_follow_link(const struct inode *parent,
62437+ const struct inode *inode,
62438+ const struct dentry *dentry,
62439+ const struct vfsmount *mnt);
62440+int gr_handle_fifo(const struct dentry *dentry,
62441+ const struct vfsmount *mnt,
62442+ const struct dentry *dir, const int flag,
62443+ const int acc_mode);
62444+int gr_handle_hardlink(const struct dentry *dentry,
62445+ const struct vfsmount *mnt,
62446+ struct inode *inode,
62447+ const int mode, const char *to);
62448+
62449+int gr_is_capable(const int cap);
62450+int gr_is_capable_nolog(const int cap);
62451+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
62452+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
62453+
62454+void gr_learn_resource(const struct task_struct *task, const int limit,
62455+ const unsigned long wanted, const int gt);
62456+void gr_copy_label(struct task_struct *tsk);
62457+void gr_handle_crash(struct task_struct *task, const int sig);
62458+int gr_handle_signal(const struct task_struct *p, const int sig);
62459+int gr_check_crash_uid(const uid_t uid);
62460+int gr_check_protected_task(const struct task_struct *task);
62461+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
62462+int gr_acl_handle_mmap(const struct file *file,
62463+ const unsigned long prot);
62464+int gr_acl_handle_mprotect(const struct file *file,
62465+ const unsigned long prot);
62466+int gr_check_hidden_task(const struct task_struct *tsk);
62467+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
62468+ const struct vfsmount *mnt);
62469+__u32 gr_acl_handle_utime(const struct dentry *dentry,
62470+ const struct vfsmount *mnt);
62471+__u32 gr_acl_handle_access(const struct dentry *dentry,
62472+ const struct vfsmount *mnt, const int fmode);
62473+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
62474+ const struct vfsmount *mnt, umode_t *mode);
62475+__u32 gr_acl_handle_chown(const struct dentry *dentry,
62476+ const struct vfsmount *mnt);
62477+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
62478+ const struct vfsmount *mnt);
62479+int gr_handle_ptrace(struct task_struct *task, const long request);
62480+int gr_handle_proc_ptrace(struct task_struct *task);
62481+__u32 gr_acl_handle_execve(const struct dentry *dentry,
62482+ const struct vfsmount *mnt);
62483+int gr_check_crash_exec(const struct file *filp);
62484+int gr_acl_is_enabled(void);
62485+void gr_set_kernel_label(struct task_struct *task);
62486+void gr_set_role_label(struct task_struct *task, const uid_t uid,
62487+ const gid_t gid);
62488+int gr_set_proc_label(const struct dentry *dentry,
62489+ const struct vfsmount *mnt,
62490+ const int unsafe_flags);
62491+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
62492+ const struct vfsmount *mnt);
62493+__u32 gr_acl_handle_open(const struct dentry *dentry,
62494+ const struct vfsmount *mnt, int acc_mode);
62495+__u32 gr_acl_handle_creat(const struct dentry *dentry,
62496+ const struct dentry *p_dentry,
62497+ const struct vfsmount *p_mnt,
62498+ int open_flags, int acc_mode, const int imode);
62499+void gr_handle_create(const struct dentry *dentry,
62500+ const struct vfsmount *mnt);
62501+void gr_handle_proc_create(const struct dentry *dentry,
62502+ const struct inode *inode);
62503+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
62504+ const struct dentry *parent_dentry,
62505+ const struct vfsmount *parent_mnt,
62506+ const int mode);
62507+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
62508+ const struct dentry *parent_dentry,
62509+ const struct vfsmount *parent_mnt);
62510+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
62511+ const struct vfsmount *mnt);
62512+void gr_handle_delete(const ino_t ino, const dev_t dev);
62513+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
62514+ const struct vfsmount *mnt);
62515+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
62516+ const struct dentry *parent_dentry,
62517+ const struct vfsmount *parent_mnt,
62518+ const char *from);
62519+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
62520+ const struct dentry *parent_dentry,
62521+ const struct vfsmount *parent_mnt,
62522+ const struct dentry *old_dentry,
62523+ const struct vfsmount *old_mnt, const char *to);
62524+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
62525+int gr_acl_handle_rename(struct dentry *new_dentry,
62526+ struct dentry *parent_dentry,
62527+ const struct vfsmount *parent_mnt,
62528+ struct dentry *old_dentry,
62529+ struct inode *old_parent_inode,
62530+ struct vfsmount *old_mnt, const char *newname);
62531+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62532+ struct dentry *old_dentry,
62533+ struct dentry *new_dentry,
62534+ struct vfsmount *mnt, const __u8 replace);
62535+__u32 gr_check_link(const struct dentry *new_dentry,
62536+ const struct dentry *parent_dentry,
62537+ const struct vfsmount *parent_mnt,
62538+ const struct dentry *old_dentry,
62539+ const struct vfsmount *old_mnt);
62540+int gr_acl_handle_filldir(const struct file *file, const char *name,
62541+ const unsigned int namelen, const ino_t ino);
62542+
62543+__u32 gr_acl_handle_unix(const struct dentry *dentry,
62544+ const struct vfsmount *mnt);
62545+void gr_acl_handle_exit(void);
62546+void gr_acl_handle_psacct(struct task_struct *task, const long code);
62547+int gr_acl_handle_procpidmem(const struct task_struct *task);
62548+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
62549+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
62550+void gr_audit_ptrace(struct task_struct *task);
62551+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
62552+
62553+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
62554+
62555+#ifdef CONFIG_GRKERNSEC
62556+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
62557+void gr_handle_vm86(void);
62558+void gr_handle_mem_readwrite(u64 from, u64 to);
62559+
62560+void gr_log_badprocpid(const char *entry);
62561+
62562+extern int grsec_enable_dmesg;
62563+extern int grsec_disable_privio;
62564+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62565+extern int grsec_enable_chroot_findtask;
62566+#endif
62567+#ifdef CONFIG_GRKERNSEC_SETXID
62568+extern int grsec_enable_setxid;
62569+#endif
62570+#endif
62571+
62572+#endif
62573diff --git a/include/linux/grsock.h b/include/linux/grsock.h
62574new file mode 100644
62575index 0000000..e7ffaaf
62576--- /dev/null
62577+++ b/include/linux/grsock.h
62578@@ -0,0 +1,19 @@
62579+#ifndef __GRSOCK_H
62580+#define __GRSOCK_H
62581+
62582+extern void gr_attach_curr_ip(const struct sock *sk);
62583+extern int gr_handle_sock_all(const int family, const int type,
62584+ const int protocol);
62585+extern int gr_handle_sock_server(const struct sockaddr *sck);
62586+extern int gr_handle_sock_server_other(const struct sock *sck);
62587+extern int gr_handle_sock_client(const struct sockaddr *sck);
62588+extern int gr_search_connect(struct socket * sock,
62589+ struct sockaddr_in * addr);
62590+extern int gr_search_bind(struct socket * sock,
62591+ struct sockaddr_in * addr);
62592+extern int gr_search_listen(struct socket * sock);
62593+extern int gr_search_accept(struct socket * sock);
62594+extern int gr_search_socket(const int domain, const int type,
62595+ const int protocol);
62596+
62597+#endif
62598diff --git a/include/linux/hid.h b/include/linux/hid.h
62599index 449fa38..b37c8cc 100644
62600--- a/include/linux/hid.h
62601+++ b/include/linux/hid.h
62602@@ -704,7 +704,7 @@ struct hid_ll_driver {
62603 unsigned int code, int value);
62604
62605 int (*parse)(struct hid_device *hdev);
62606-};
62607+} __no_const;
62608
62609 #define PM_HINT_FULLON 1<<5
62610 #define PM_HINT_NORMAL 1<<1
62611diff --git a/include/linux/highmem.h b/include/linux/highmem.h
62612index d3999b4..1304cb4 100644
62613--- a/include/linux/highmem.h
62614+++ b/include/linux/highmem.h
62615@@ -221,6 +221,18 @@ static inline void clear_highpage(struct page *page)
62616 kunmap_atomic(kaddr);
62617 }
62618
62619+static inline void sanitize_highpage(struct page *page)
62620+{
62621+ void *kaddr;
62622+ unsigned long flags;
62623+
62624+ local_irq_save(flags);
62625+ kaddr = kmap_atomic(page);
62626+ clear_page(kaddr);
62627+ kunmap_atomic(kaddr);
62628+ local_irq_restore(flags);
62629+}
62630+
62631 static inline void zero_user_segments(struct page *page,
62632 unsigned start1, unsigned end1,
62633 unsigned start2, unsigned end2)
62634diff --git a/include/linux/i2c.h b/include/linux/i2c.h
62635index ddfa041..a44cfff 100644
62636--- a/include/linux/i2c.h
62637+++ b/include/linux/i2c.h
62638@@ -366,6 +366,7 @@ struct i2c_algorithm {
62639 /* To determine what the adapter supports */
62640 u32 (*functionality) (struct i2c_adapter *);
62641 };
62642+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
62643
62644 /*
62645 * i2c_adapter is the structure used to identify a physical i2c bus along
62646diff --git a/include/linux/i2o.h b/include/linux/i2o.h
62647index d23c3c2..eb63c81 100644
62648--- a/include/linux/i2o.h
62649+++ b/include/linux/i2o.h
62650@@ -565,7 +565,7 @@ struct i2o_controller {
62651 struct i2o_device *exec; /* Executive */
62652 #if BITS_PER_LONG == 64
62653 spinlock_t context_list_lock; /* lock for context_list */
62654- atomic_t context_list_counter; /* needed for unique contexts */
62655+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
62656 struct list_head context_list; /* list of context id's
62657 and pointers */
62658 #endif
62659diff --git a/include/linux/if_team.h b/include/linux/if_team.h
62660index 8185f57..7b2d222 100644
62661--- a/include/linux/if_team.h
62662+++ b/include/linux/if_team.h
62663@@ -74,6 +74,7 @@ struct team_mode_ops {
62664 void (*port_leave)(struct team *team, struct team_port *port);
62665 void (*port_change_mac)(struct team *team, struct team_port *port);
62666 };
62667+typedef struct team_mode_ops __no_const team_mode_ops_no_const;
62668
62669 enum team_option_type {
62670 TEAM_OPTION_TYPE_U32,
62671@@ -136,7 +137,7 @@ struct team {
62672 struct list_head option_inst_list; /* list of option instances */
62673
62674 const struct team_mode *mode;
62675- struct team_mode_ops ops;
62676+ team_mode_ops_no_const ops;
62677 long mode_priv[TEAM_MODE_PRIV_LONGS];
62678 };
62679
62680diff --git a/include/linux/init.h b/include/linux/init.h
62681index 6b95109..7616d09 100644
62682--- a/include/linux/init.h
62683+++ b/include/linux/init.h
62684@@ -39,9 +39,15 @@
62685 * Also note, that this data cannot be "const".
62686 */
62687
62688+#ifdef MODULE
62689+#define add_latent_entropy
62690+#else
62691+#define add_latent_entropy __latent_entropy
62692+#endif
62693+
62694 /* These are for everybody (although not all archs will actually
62695 discard it in modules) */
62696-#define __init __section(.init.text) __cold notrace
62697+#define __init __section(.init.text) __cold notrace add_latent_entropy
62698 #define __initdata __section(.init.data)
62699 #define __initconst __section(.init.rodata)
62700 #define __exitdata __section(.exit.data)
62701@@ -83,7 +89,7 @@
62702 #define __exit __section(.exit.text) __exitused __cold notrace
62703
62704 /* Used for HOTPLUG */
62705-#define __devinit __section(.devinit.text) __cold notrace
62706+#define __devinit __section(.devinit.text) __cold notrace add_latent_entropy
62707 #define __devinitdata __section(.devinit.data)
62708 #define __devinitconst __section(.devinit.rodata)
62709 #define __devexit __section(.devexit.text) __exitused __cold notrace
62710@@ -91,7 +97,7 @@
62711 #define __devexitconst __section(.devexit.rodata)
62712
62713 /* Used for HOTPLUG_CPU */
62714-#define __cpuinit __section(.cpuinit.text) __cold notrace
62715+#define __cpuinit __section(.cpuinit.text) __cold notrace add_latent_entropy
62716 #define __cpuinitdata __section(.cpuinit.data)
62717 #define __cpuinitconst __section(.cpuinit.rodata)
62718 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
62719@@ -99,7 +105,7 @@
62720 #define __cpuexitconst __section(.cpuexit.rodata)
62721
62722 /* Used for MEMORY_HOTPLUG */
62723-#define __meminit __section(.meminit.text) __cold notrace
62724+#define __meminit __section(.meminit.text) __cold notrace add_latent_entropy
62725 #define __meminitdata __section(.meminit.data)
62726 #define __meminitconst __section(.meminit.rodata)
62727 #define __memexit __section(.memexit.text) __exitused __cold notrace
62728diff --git a/include/linux/init_task.h b/include/linux/init_task.h
62729index 9e65eff..b131e8b 100644
62730--- a/include/linux/init_task.h
62731+++ b/include/linux/init_task.h
62732@@ -134,6 +134,12 @@ extern struct cred init_cred;
62733
62734 #define INIT_TASK_COMM "swapper"
62735
62736+#ifdef CONFIG_X86
62737+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
62738+#else
62739+#define INIT_TASK_THREAD_INFO
62740+#endif
62741+
62742 /*
62743 * INIT_TASK is used to set up the first task table, touch at
62744 * your own risk!. Base=0, limit=0x1fffff (=2MB)
62745@@ -172,6 +178,7 @@ extern struct cred init_cred;
62746 RCU_INIT_POINTER(.cred, &init_cred), \
62747 .comm = INIT_TASK_COMM, \
62748 .thread = INIT_THREAD, \
62749+ INIT_TASK_THREAD_INFO \
62750 .fs = &init_fs, \
62751 .files = &init_files, \
62752 .signal = &init_signals, \
62753diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
62754index e6ca56d..8583707 100644
62755--- a/include/linux/intel-iommu.h
62756+++ b/include/linux/intel-iommu.h
62757@@ -296,7 +296,7 @@ struct iommu_flush {
62758 u8 fm, u64 type);
62759 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
62760 unsigned int size_order, u64 type);
62761-};
62762+} __no_const;
62763
62764 enum {
62765 SR_DMAR_FECTL_REG,
62766diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
62767index e68a8e5..811b9af 100644
62768--- a/include/linux/interrupt.h
62769+++ b/include/linux/interrupt.h
62770@@ -435,7 +435,7 @@ enum
62771 /* map softirq index to softirq name. update 'softirq_to_name' in
62772 * kernel/softirq.c when adding a new softirq.
62773 */
62774-extern char *softirq_to_name[NR_SOFTIRQS];
62775+extern const char * const softirq_to_name[NR_SOFTIRQS];
62776
62777 /* softirq mask and active fields moved to irq_cpustat_t in
62778 * asm/hardirq.h to get better cache usage. KAO
62779@@ -443,12 +443,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
62780
62781 struct softirq_action
62782 {
62783- void (*action)(struct softirq_action *);
62784+ void (*action)(void);
62785 };
62786
62787 asmlinkage void do_softirq(void);
62788 asmlinkage void __do_softirq(void);
62789-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
62790+extern void open_softirq(int nr, void (*action)(void));
62791 extern void softirq_init(void);
62792 extern void __raise_softirq_irqoff(unsigned int nr);
62793
62794diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
62795index 6883e19..06992b1 100644
62796--- a/include/linux/kallsyms.h
62797+++ b/include/linux/kallsyms.h
62798@@ -15,7 +15,8 @@
62799
62800 struct module;
62801
62802-#ifdef CONFIG_KALLSYMS
62803+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
62804+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62805 /* Lookup the address for a symbol. Returns 0 if not found. */
62806 unsigned long kallsyms_lookup_name(const char *name);
62807
62808@@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
62809 /* Stupid that this does nothing, but I didn't create this mess. */
62810 #define __print_symbol(fmt, addr)
62811 #endif /*CONFIG_KALLSYMS*/
62812+#else /* when included by kallsyms.c, vsnprintf.c, or
62813+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
62814+extern void __print_symbol(const char *fmt, unsigned long address);
62815+extern int sprint_backtrace(char *buffer, unsigned long address);
62816+extern int sprint_symbol(char *buffer, unsigned long address);
62817+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
62818+const char *kallsyms_lookup(unsigned long addr,
62819+ unsigned long *symbolsize,
62820+ unsigned long *offset,
62821+ char **modname, char *namebuf);
62822+#endif
62823
62824 /* This macro allows us to keep printk typechecking */
62825 static __printf(1, 2)
62826diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
62827index c4d2fc1..5df9c19 100644
62828--- a/include/linux/kgdb.h
62829+++ b/include/linux/kgdb.h
62830@@ -53,7 +53,7 @@ extern int kgdb_connected;
62831 extern int kgdb_io_module_registered;
62832
62833 extern atomic_t kgdb_setting_breakpoint;
62834-extern atomic_t kgdb_cpu_doing_single_step;
62835+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
62836
62837 extern struct task_struct *kgdb_usethread;
62838 extern struct task_struct *kgdb_contthread;
62839@@ -252,7 +252,7 @@ struct kgdb_arch {
62840 void (*disable_hw_break)(struct pt_regs *regs);
62841 void (*remove_all_hw_break)(void);
62842 void (*correct_hw_break)(void);
62843-};
62844+} __do_const;
62845
62846 /**
62847 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
62848@@ -277,7 +277,7 @@ struct kgdb_io {
62849 void (*pre_exception) (void);
62850 void (*post_exception) (void);
62851 int is_console;
62852-};
62853+} __do_const;
62854
62855 extern struct kgdb_arch arch_kgdb_ops;
62856
62857diff --git a/include/linux/kmod.h b/include/linux/kmod.h
62858index 5398d58..5883a34 100644
62859--- a/include/linux/kmod.h
62860+++ b/include/linux/kmod.h
62861@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
62862 * usually useless though. */
62863 extern __printf(2, 3)
62864 int __request_module(bool wait, const char *name, ...);
62865+extern __printf(3, 4)
62866+int ___request_module(bool wait, char *param_name, const char *name, ...);
62867 #define request_module(mod...) __request_module(true, mod)
62868 #define request_module_nowait(mod...) __request_module(false, mod)
62869 #define try_then_request_module(x, mod...) \
62870diff --git a/include/linux/kobject.h b/include/linux/kobject.h
62871index fc615a9..1e57449 100644
62872--- a/include/linux/kobject.h
62873+++ b/include/linux/kobject.h
62874@@ -224,7 +224,7 @@ static inline int kobject_uevent_env(struct kobject *kobj,
62875
62876 static inline __printf(2, 3)
62877 int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
62878-{ return 0; }
62879+{ return -ENOMEM; }
62880
62881 static inline int kobject_action_type(const char *buf, size_t count,
62882 enum kobject_action *type)
62883diff --git a/include/linux/kref.h b/include/linux/kref.h
62884index 9c07dce..a92fa71 100644
62885--- a/include/linux/kref.h
62886+++ b/include/linux/kref.h
62887@@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
62888 static inline int kref_sub(struct kref *kref, unsigned int count,
62889 void (*release)(struct kref *kref))
62890 {
62891- WARN_ON(release == NULL);
62892+ BUG_ON(release == NULL);
62893
62894 if (atomic_sub_and_test((int) count, &kref->refcount)) {
62895 release(kref);
62896diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
62897index 96c158a..1864db5 100644
62898--- a/include/linux/kvm_host.h
62899+++ b/include/linux/kvm_host.h
62900@@ -345,7 +345,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
62901 void vcpu_load(struct kvm_vcpu *vcpu);
62902 void vcpu_put(struct kvm_vcpu *vcpu);
62903
62904-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62905+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62906 struct module *module);
62907 void kvm_exit(void);
62908
62909@@ -511,7 +511,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
62910 struct kvm_guest_debug *dbg);
62911 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
62912
62913-int kvm_arch_init(void *opaque);
62914+int kvm_arch_init(const void *opaque);
62915 void kvm_arch_exit(void);
62916
62917 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
62918diff --git a/include/linux/libata.h b/include/linux/libata.h
62919index 6e887c7..4539601 100644
62920--- a/include/linux/libata.h
62921+++ b/include/linux/libata.h
62922@@ -910,7 +910,7 @@ struct ata_port_operations {
62923 * fields must be pointers.
62924 */
62925 const struct ata_port_operations *inherits;
62926-};
62927+} __do_const;
62928
62929 struct ata_port_info {
62930 unsigned long flags;
62931diff --git a/include/linux/memory.h b/include/linux/memory.h
62932index 1ac7f6e..a5794d0 100644
62933--- a/include/linux/memory.h
62934+++ b/include/linux/memory.h
62935@@ -143,7 +143,7 @@ struct memory_accessor {
62936 size_t count);
62937 ssize_t (*write)(struct memory_accessor *, const char *buf,
62938 off_t offset, size_t count);
62939-};
62940+} __no_const;
62941
62942 /*
62943 * Kernel text modification mutex, used for code patching. Users of this lock
62944diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
62945index 1318ca6..7521340 100644
62946--- a/include/linux/mfd/abx500.h
62947+++ b/include/linux/mfd/abx500.h
62948@@ -452,6 +452,7 @@ struct abx500_ops {
62949 int (*event_registers_startup_state_get) (struct device *, u8 *);
62950 int (*startup_irq_enabled) (struct device *, unsigned int);
62951 };
62952+typedef struct abx500_ops __no_const abx500_ops_no_const;
62953
62954 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
62955 void abx500_remove_ops(struct device *dev);
62956diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
62957index 9b07725..3d55001 100644
62958--- a/include/linux/mfd/abx500/ux500_chargalg.h
62959+++ b/include/linux/mfd/abx500/ux500_chargalg.h
62960@@ -19,7 +19,7 @@ struct ux500_charger_ops {
62961 int (*enable) (struct ux500_charger *, int, int, int);
62962 int (*kick_wd) (struct ux500_charger *);
62963 int (*update_curr) (struct ux500_charger *, int);
62964-};
62965+} __no_const;
62966
62967 /**
62968 * struct ux500_charger - power supply ux500 charger sub class
62969diff --git a/include/linux/mm.h b/include/linux/mm.h
62970index f9f279c..198da78 100644
62971--- a/include/linux/mm.h
62972+++ b/include/linux/mm.h
62973@@ -116,7 +116,14 @@ extern unsigned int kobjsize(const void *objp);
62974
62975 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
62976 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
62977+
62978+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62979+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
62980+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
62981+#else
62982 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
62983+#endif
62984+
62985 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
62986 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
62987
62988@@ -1009,34 +1016,6 @@ int set_page_dirty(struct page *page);
62989 int set_page_dirty_lock(struct page *page);
62990 int clear_page_dirty_for_io(struct page *page);
62991
62992-/* Is the vma a continuation of the stack vma above it? */
62993-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
62994-{
62995- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
62996-}
62997-
62998-static inline int stack_guard_page_start(struct vm_area_struct *vma,
62999- unsigned long addr)
63000-{
63001- return (vma->vm_flags & VM_GROWSDOWN) &&
63002- (vma->vm_start == addr) &&
63003- !vma_growsdown(vma->vm_prev, addr);
63004-}
63005-
63006-/* Is the vma a continuation of the stack vma below it? */
63007-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
63008-{
63009- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
63010-}
63011-
63012-static inline int stack_guard_page_end(struct vm_area_struct *vma,
63013- unsigned long addr)
63014-{
63015- return (vma->vm_flags & VM_GROWSUP) &&
63016- (vma->vm_end == addr) &&
63017- !vma_growsup(vma->vm_next, addr);
63018-}
63019-
63020 extern pid_t
63021 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
63022
63023@@ -1135,6 +1114,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
63024 }
63025 #endif
63026
63027+#ifdef CONFIG_MMU
63028+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
63029+#else
63030+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
63031+{
63032+ return __pgprot(0);
63033+}
63034+#endif
63035+
63036 int vma_wants_writenotify(struct vm_area_struct *vma);
63037
63038 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
63039@@ -1153,8 +1141,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
63040 {
63041 return 0;
63042 }
63043+
63044+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
63045+ unsigned long address)
63046+{
63047+ return 0;
63048+}
63049 #else
63050 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
63051+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
63052 #endif
63053
63054 #ifdef __PAGETABLE_PMD_FOLDED
63055@@ -1163,8 +1158,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
63056 {
63057 return 0;
63058 }
63059+
63060+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
63061+ unsigned long address)
63062+{
63063+ return 0;
63064+}
63065 #else
63066 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
63067+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
63068 #endif
63069
63070 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
63071@@ -1182,11 +1184,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
63072 NULL: pud_offset(pgd, address);
63073 }
63074
63075+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
63076+{
63077+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
63078+ NULL: pud_offset(pgd, address);
63079+}
63080+
63081 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
63082 {
63083 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
63084 NULL: pmd_offset(pud, address);
63085 }
63086+
63087+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
63088+{
63089+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
63090+ NULL: pmd_offset(pud, address);
63091+}
63092 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
63093
63094 #if USE_SPLIT_PTLOCKS
63095@@ -1396,6 +1410,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
63096 unsigned long, unsigned long,
63097 unsigned long, unsigned long);
63098 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
63099+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
63100
63101 /* These take the mm semaphore themselves */
63102 extern unsigned long vm_brk(unsigned long, unsigned long);
63103@@ -1458,6 +1473,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
63104 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
63105 struct vm_area_struct **pprev);
63106
63107+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
63108+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
63109+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
63110+
63111 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
63112 NULL if none. Assume start_addr < end_addr. */
63113 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
63114@@ -1486,15 +1505,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
63115 return vma;
63116 }
63117
63118-#ifdef CONFIG_MMU
63119-pgprot_t vm_get_page_prot(unsigned long vm_flags);
63120-#else
63121-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
63122-{
63123- return __pgprot(0);
63124-}
63125-#endif
63126-
63127 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
63128 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
63129 unsigned long pfn, unsigned long size, pgprot_t);
63130@@ -1599,7 +1609,7 @@ extern int unpoison_memory(unsigned long pfn);
63131 extern int sysctl_memory_failure_early_kill;
63132 extern int sysctl_memory_failure_recovery;
63133 extern void shake_page(struct page *p, int access);
63134-extern atomic_long_t mce_bad_pages;
63135+extern atomic_long_unchecked_t mce_bad_pages;
63136 extern int soft_offline_page(struct page *page, int flags);
63137
63138 extern void dump_page(struct page *page);
63139@@ -1630,5 +1640,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
63140 static inline bool page_is_guard(struct page *page) { return false; }
63141 #endif /* CONFIG_DEBUG_PAGEALLOC */
63142
63143+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63144+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
63145+#else
63146+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
63147+#endif
63148+
63149 #endif /* __KERNEL__ */
63150 #endif /* _LINUX_MM_H */
63151diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
63152index 704a626..bb0705a 100644
63153--- a/include/linux/mm_types.h
63154+++ b/include/linux/mm_types.h
63155@@ -263,6 +263,8 @@ struct vm_area_struct {
63156 #ifdef CONFIG_NUMA
63157 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
63158 #endif
63159+
63160+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
63161 };
63162
63163 struct core_thread {
63164@@ -337,7 +339,7 @@ struct mm_struct {
63165 unsigned long def_flags;
63166 unsigned long nr_ptes; /* Page table pages */
63167 unsigned long start_code, end_code, start_data, end_data;
63168- unsigned long start_brk, brk, start_stack;
63169+ unsigned long brk_gap, start_brk, brk, start_stack;
63170 unsigned long arg_start, arg_end, env_start, env_end;
63171
63172 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
63173@@ -389,6 +391,24 @@ struct mm_struct {
63174 struct cpumask cpumask_allocation;
63175 #endif
63176 struct uprobes_state uprobes_state;
63177+
63178+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
63179+ unsigned long pax_flags;
63180+#endif
63181+
63182+#ifdef CONFIG_PAX_DLRESOLVE
63183+ unsigned long call_dl_resolve;
63184+#endif
63185+
63186+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
63187+ unsigned long call_syscall;
63188+#endif
63189+
63190+#ifdef CONFIG_PAX_ASLR
63191+ unsigned long delta_mmap; /* randomized offset */
63192+ unsigned long delta_stack; /* randomized offset */
63193+#endif
63194+
63195 };
63196
63197 static inline void mm_init_cpumask(struct mm_struct *mm)
63198diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
63199index 1d1b1e1..2a13c78 100644
63200--- a/include/linux/mmu_notifier.h
63201+++ b/include/linux/mmu_notifier.h
63202@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
63203 */
63204 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
63205 ({ \
63206- pte_t __pte; \
63207+ pte_t ___pte; \
63208 struct vm_area_struct *___vma = __vma; \
63209 unsigned long ___address = __address; \
63210- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
63211+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
63212 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
63213- __pte; \
63214+ ___pte; \
63215 })
63216
63217 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
63218diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
63219index 68c569f..5f43753 100644
63220--- a/include/linux/mmzone.h
63221+++ b/include/linux/mmzone.h
63222@@ -411,7 +411,7 @@ struct zone {
63223 unsigned long flags; /* zone flags, see below */
63224
63225 /* Zone statistics */
63226- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63227+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63228
63229 /*
63230 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
63231diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
63232index 5db9382..50e801d 100644
63233--- a/include/linux/mod_devicetable.h
63234+++ b/include/linux/mod_devicetable.h
63235@@ -12,7 +12,7 @@
63236 typedef unsigned long kernel_ulong_t;
63237 #endif
63238
63239-#define PCI_ANY_ID (~0)
63240+#define PCI_ANY_ID ((__u16)~0)
63241
63242 struct pci_device_id {
63243 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
63244@@ -131,7 +131,7 @@ struct usb_device_id {
63245 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
63246 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
63247
63248-#define HID_ANY_ID (~0)
63249+#define HID_ANY_ID (~0U)
63250 #define HID_BUS_ANY 0xffff
63251 #define HID_GROUP_ANY 0x0000
63252
63253diff --git a/include/linux/module.h b/include/linux/module.h
63254index fbcafe2..e5d9587 100644
63255--- a/include/linux/module.h
63256+++ b/include/linux/module.h
63257@@ -17,6 +17,7 @@
63258 #include <linux/moduleparam.h>
63259 #include <linux/tracepoint.h>
63260 #include <linux/export.h>
63261+#include <linux/fs.h>
63262
63263 #include <linux/percpu.h>
63264 #include <asm/module.h>
63265@@ -273,19 +274,16 @@ struct module
63266 int (*init)(void);
63267
63268 /* If this is non-NULL, vfree after init() returns */
63269- void *module_init;
63270+ void *module_init_rx, *module_init_rw;
63271
63272 /* Here is the actual code + data, vfree'd on unload. */
63273- void *module_core;
63274+ void *module_core_rx, *module_core_rw;
63275
63276 /* Here are the sizes of the init and core sections */
63277- unsigned int init_size, core_size;
63278+ unsigned int init_size_rw, core_size_rw;
63279
63280 /* The size of the executable code in each section. */
63281- unsigned int init_text_size, core_text_size;
63282-
63283- /* Size of RO sections of the module (text+rodata) */
63284- unsigned int init_ro_size, core_ro_size;
63285+ unsigned int init_size_rx, core_size_rx;
63286
63287 /* Arch-specific module values */
63288 struct mod_arch_specific arch;
63289@@ -341,6 +339,10 @@ struct module
63290 #ifdef CONFIG_EVENT_TRACING
63291 struct ftrace_event_call **trace_events;
63292 unsigned int num_trace_events;
63293+ struct file_operations trace_id;
63294+ struct file_operations trace_enable;
63295+ struct file_operations trace_format;
63296+ struct file_operations trace_filter;
63297 #endif
63298 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
63299 unsigned int num_ftrace_callsites;
63300@@ -388,16 +390,46 @@ bool is_module_address(unsigned long addr);
63301 bool is_module_percpu_address(unsigned long addr);
63302 bool is_module_text_address(unsigned long addr);
63303
63304+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
63305+{
63306+
63307+#ifdef CONFIG_PAX_KERNEXEC
63308+ if (ktla_ktva(addr) >= (unsigned long)start &&
63309+ ktla_ktva(addr) < (unsigned long)start + size)
63310+ return 1;
63311+#endif
63312+
63313+ return ((void *)addr >= start && (void *)addr < start + size);
63314+}
63315+
63316+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
63317+{
63318+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
63319+}
63320+
63321+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
63322+{
63323+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
63324+}
63325+
63326+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
63327+{
63328+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
63329+}
63330+
63331+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
63332+{
63333+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
63334+}
63335+
63336 static inline int within_module_core(unsigned long addr, struct module *mod)
63337 {
63338- return (unsigned long)mod->module_core <= addr &&
63339- addr < (unsigned long)mod->module_core + mod->core_size;
63340+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
63341 }
63342
63343 static inline int within_module_init(unsigned long addr, struct module *mod)
63344 {
63345- return (unsigned long)mod->module_init <= addr &&
63346- addr < (unsigned long)mod->module_init + mod->init_size;
63347+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
63348 }
63349
63350 /* Search for module by name: must hold module_mutex. */
63351diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
63352index b2be02e..72d2f78 100644
63353--- a/include/linux/moduleloader.h
63354+++ b/include/linux/moduleloader.h
63355@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
63356
63357 /* Allocator used for allocating struct module, core sections and init
63358 sections. Returns NULL on failure. */
63359-void *module_alloc(unsigned long size);
63360+void *module_alloc(unsigned long size) __size_overflow(1);
63361+
63362+#ifdef CONFIG_PAX_KERNEXEC
63363+void *module_alloc_exec(unsigned long size) __size_overflow(1);
63364+#else
63365+#define module_alloc_exec(x) module_alloc(x)
63366+#endif
63367
63368 /* Free memory returned from module_alloc. */
63369 void module_free(struct module *mod, void *module_region);
63370
63371+#ifdef CONFIG_PAX_KERNEXEC
63372+void module_free_exec(struct module *mod, void *module_region);
63373+#else
63374+#define module_free_exec(x, y) module_free((x), (y))
63375+#endif
63376+
63377 /* Apply the given relocation to the (simplified) ELF. Return -error
63378 or 0. */
63379 int apply_relocate(Elf_Shdr *sechdrs,
63380diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
63381index d6a5806..7c13347 100644
63382--- a/include/linux/moduleparam.h
63383+++ b/include/linux/moduleparam.h
63384@@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
63385 * @len is usually just sizeof(string).
63386 */
63387 #define module_param_string(name, string, len, perm) \
63388- static const struct kparam_string __param_string_##name \
63389+ static const struct kparam_string __param_string_##name __used \
63390 = { len, string }; \
63391 __module_param_call(MODULE_PARAM_PREFIX, name, \
63392 &param_ops_string, \
63393@@ -425,7 +425,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
63394 */
63395 #define module_param_array_named(name, array, type, nump, perm) \
63396 param_check_##type(name, &(array)[0]); \
63397- static const struct kparam_array __param_arr_##name \
63398+ static const struct kparam_array __param_arr_##name __used \
63399 = { .max = ARRAY_SIZE(array), .num = nump, \
63400 .ops = &param_ops_##type, \
63401 .elemsize = sizeof(array[0]), .elem = array }; \
63402diff --git a/include/linux/namei.h b/include/linux/namei.h
63403index ffc0213..2c1f2cb 100644
63404--- a/include/linux/namei.h
63405+++ b/include/linux/namei.h
63406@@ -24,7 +24,7 @@ struct nameidata {
63407 unsigned seq;
63408 int last_type;
63409 unsigned depth;
63410- char *saved_names[MAX_NESTED_LINKS + 1];
63411+ const char *saved_names[MAX_NESTED_LINKS + 1];
63412
63413 /* Intent data */
63414 union {
63415@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
63416 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
63417 extern void unlock_rename(struct dentry *, struct dentry *);
63418
63419-static inline void nd_set_link(struct nameidata *nd, char *path)
63420+static inline void nd_set_link(struct nameidata *nd, const char *path)
63421 {
63422 nd->saved_names[nd->depth] = path;
63423 }
63424
63425-static inline char *nd_get_link(struct nameidata *nd)
63426+static inline const char *nd_get_link(const struct nameidata *nd)
63427 {
63428 return nd->saved_names[nd->depth];
63429 }
63430diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
63431index d94cb14..e64c951 100644
63432--- a/include/linux/netdevice.h
63433+++ b/include/linux/netdevice.h
63434@@ -1026,6 +1026,7 @@ struct net_device_ops {
63435 struct net_device *dev,
63436 int idx);
63437 };
63438+typedef struct net_device_ops __no_const net_device_ops_no_const;
63439
63440 /*
63441 * The DEVICE structure.
63442@@ -1087,7 +1088,7 @@ struct net_device {
63443 int iflink;
63444
63445 struct net_device_stats stats;
63446- atomic_long_t rx_dropped; /* dropped packets by core network
63447+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
63448 * Do not use this in drivers.
63449 */
63450
63451diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
63452new file mode 100644
63453index 0000000..33f4af8
63454--- /dev/null
63455+++ b/include/linux/netfilter/xt_gradm.h
63456@@ -0,0 +1,9 @@
63457+#ifndef _LINUX_NETFILTER_XT_GRADM_H
63458+#define _LINUX_NETFILTER_XT_GRADM_H 1
63459+
63460+struct xt_gradm_mtinfo {
63461+ __u16 flags;
63462+ __u16 invflags;
63463+};
63464+
63465+#endif
63466diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
63467index c65a18a..0c05f3a 100644
63468--- a/include/linux/of_pdt.h
63469+++ b/include/linux/of_pdt.h
63470@@ -32,7 +32,7 @@ struct of_pdt_ops {
63471
63472 /* return 0 on success; fill in 'len' with number of bytes in path */
63473 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
63474-};
63475+} __no_const;
63476
63477 extern void *prom_early_alloc(unsigned long size);
63478
63479diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
63480index a4c5624..79d6d88 100644
63481--- a/include/linux/oprofile.h
63482+++ b/include/linux/oprofile.h
63483@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
63484 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
63485 char const * name, ulong * val);
63486
63487-/** Create a file for read-only access to an atomic_t. */
63488+/** Create a file for read-only access to an atomic_unchecked_t. */
63489 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
63490- char const * name, atomic_t * val);
63491+ char const * name, atomic_unchecked_t * val);
63492
63493 /** create a directory */
63494 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
63495diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
63496index 45db49f..8795db3 100644
63497--- a/include/linux/perf_event.h
63498+++ b/include/linux/perf_event.h
63499@@ -879,8 +879,8 @@ struct perf_event {
63500
63501 enum perf_event_active_state state;
63502 unsigned int attach_state;
63503- local64_t count;
63504- atomic64_t child_count;
63505+ local64_t count; /* PaX: fix it one day */
63506+ atomic64_unchecked_t child_count;
63507
63508 /*
63509 * These are the total time in nanoseconds that the event
63510@@ -925,14 +925,14 @@ struct perf_event {
63511 struct hw_perf_event hw;
63512
63513 struct perf_event_context *ctx;
63514- struct file *filp;
63515+ atomic_long_t refcount;
63516
63517 /*
63518 * These accumulate total time (in nanoseconds) that children
63519 * events have been enabled and running, respectively.
63520 */
63521- atomic64_t child_total_time_enabled;
63522- atomic64_t child_total_time_running;
63523+ atomic64_unchecked_t child_total_time_enabled;
63524+ atomic64_unchecked_t child_total_time_running;
63525
63526 /*
63527 * Protect attach/detach and child_list:
63528diff --git a/include/linux/personality.h b/include/linux/personality.h
63529index 8fc7dd1a..c19d89e 100644
63530--- a/include/linux/personality.h
63531+++ b/include/linux/personality.h
63532@@ -44,6 +44,7 @@ enum {
63533 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
63534 ADDR_NO_RANDOMIZE | \
63535 ADDR_COMPAT_LAYOUT | \
63536+ ADDR_LIMIT_3GB | \
63537 MMAP_PAGE_ZERO)
63538
63539 /*
63540diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
63541index e1ac1ce..0675fed 100644
63542--- a/include/linux/pipe_fs_i.h
63543+++ b/include/linux/pipe_fs_i.h
63544@@ -45,9 +45,9 @@ struct pipe_buffer {
63545 struct pipe_inode_info {
63546 wait_queue_head_t wait;
63547 unsigned int nrbufs, curbuf, buffers;
63548- unsigned int readers;
63549- unsigned int writers;
63550- unsigned int waiting_writers;
63551+ atomic_t readers;
63552+ atomic_t writers;
63553+ atomic_t waiting_writers;
63554 unsigned int r_counter;
63555 unsigned int w_counter;
63556 struct page *tmp_page;
63557diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
63558index f271860..6b3bec5 100644
63559--- a/include/linux/pm_runtime.h
63560+++ b/include/linux/pm_runtime.h
63561@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
63562
63563 static inline void pm_runtime_mark_last_busy(struct device *dev)
63564 {
63565- ACCESS_ONCE(dev->power.last_busy) = jiffies;
63566+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
63567 }
63568
63569 #else /* !CONFIG_PM_RUNTIME */
63570diff --git a/include/linux/poison.h b/include/linux/poison.h
63571index 2110a81..13a11bb 100644
63572--- a/include/linux/poison.h
63573+++ b/include/linux/poison.h
63574@@ -19,8 +19,8 @@
63575 * under normal circumstances, used to verify that nobody uses
63576 * non-initialized list entries.
63577 */
63578-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
63579-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
63580+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
63581+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
63582
63583 /********** include/linux/timer.h **********/
63584 /*
63585diff --git a/include/linux/preempt.h b/include/linux/preempt.h
63586index 5a710b9..0b0dab9 100644
63587--- a/include/linux/preempt.h
63588+++ b/include/linux/preempt.h
63589@@ -126,7 +126,7 @@ struct preempt_ops {
63590 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
63591 void (*sched_out)(struct preempt_notifier *notifier,
63592 struct task_struct *next);
63593-};
63594+} __no_const;
63595
63596 /**
63597 * preempt_notifier - key for installing preemption notifiers
63598diff --git a/include/linux/printk.h b/include/linux/printk.h
63599index 1bec2f7..b66e833 100644
63600--- a/include/linux/printk.h
63601+++ b/include/linux/printk.h
63602@@ -94,6 +94,8 @@ void early_printk(const char *fmt, ...);
63603 extern int printk_needs_cpu(int cpu);
63604 extern void printk_tick(void);
63605
63606+extern int kptr_restrict;
63607+
63608 #ifdef CONFIG_PRINTK
63609 asmlinkage __printf(5, 0)
63610 int vprintk_emit(int facility, int level,
63611@@ -128,7 +130,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
63612
63613 extern int printk_delay_msec;
63614 extern int dmesg_restrict;
63615-extern int kptr_restrict;
63616
63617 void log_buf_kexec_setup(void);
63618 void __init setup_log_buf(int early);
63619diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
63620index 3fd2e87..d93a721 100644
63621--- a/include/linux/proc_fs.h
63622+++ b/include/linux/proc_fs.h
63623@@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
63624 return proc_create_data(name, mode, parent, proc_fops, NULL);
63625 }
63626
63627+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
63628+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
63629+{
63630+#ifdef CONFIG_GRKERNSEC_PROC_USER
63631+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
63632+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63633+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
63634+#else
63635+ return proc_create_data(name, mode, parent, proc_fops, NULL);
63636+#endif
63637+}
63638+
63639 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
63640 umode_t mode, struct proc_dir_entry *base,
63641 read_proc_t *read_proc, void * data)
63642@@ -258,7 +270,7 @@ union proc_op {
63643 int (*proc_show)(struct seq_file *m,
63644 struct pid_namespace *ns, struct pid *pid,
63645 struct task_struct *task);
63646-};
63647+} __no_const;
63648
63649 struct ctl_table_header;
63650 struct ctl_table;
63651diff --git a/include/linux/random.h b/include/linux/random.h
63652index ac621ce..c1215f3 100644
63653--- a/include/linux/random.h
63654+++ b/include/linux/random.h
63655@@ -53,6 +53,10 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
63656 unsigned int value);
63657 extern void add_interrupt_randomness(int irq, int irq_flags);
63658
63659+#ifdef CONFIG_PAX_LATENT_ENTROPY
63660+extern void transfer_latent_entropy(void);
63661+#endif
63662+
63663 extern void get_random_bytes(void *buf, int nbytes);
63664 extern void get_random_bytes_arch(void *buf, int nbytes);
63665 void generate_random_uuid(unsigned char uuid_out[16]);
63666@@ -69,12 +73,17 @@ void srandom32(u32 seed);
63667
63668 u32 prandom32(struct rnd_state *);
63669
63670+static inline unsigned long pax_get_random_long(void)
63671+{
63672+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
63673+}
63674+
63675 /*
63676 * Handle minimum values for seeds
63677 */
63678 static inline u32 __seed(u32 x, u32 m)
63679 {
63680- return (x < m) ? x + m : x;
63681+ return (x <= m) ? x + m + 1 : x;
63682 }
63683
63684 /**
63685diff --git a/include/linux/reboot.h b/include/linux/reboot.h
63686index e0879a7..a12f962 100644
63687--- a/include/linux/reboot.h
63688+++ b/include/linux/reboot.h
63689@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
63690 * Architecture-specific implementations of sys_reboot commands.
63691 */
63692
63693-extern void machine_restart(char *cmd);
63694-extern void machine_halt(void);
63695-extern void machine_power_off(void);
63696+extern void machine_restart(char *cmd) __noreturn;
63697+extern void machine_halt(void) __noreturn;
63698+extern void machine_power_off(void) __noreturn;
63699
63700 extern void machine_shutdown(void);
63701 struct pt_regs;
63702@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
63703 */
63704
63705 extern void kernel_restart_prepare(char *cmd);
63706-extern void kernel_restart(char *cmd);
63707-extern void kernel_halt(void);
63708-extern void kernel_power_off(void);
63709+extern void kernel_restart(char *cmd) __noreturn;
63710+extern void kernel_halt(void) __noreturn;
63711+extern void kernel_power_off(void) __noreturn;
63712
63713 extern int C_A_D; /* for sysctl */
63714 void ctrl_alt_del(void);
63715@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
63716 * Emergency restart, callable from an interrupt handler.
63717 */
63718
63719-extern void emergency_restart(void);
63720+extern void emergency_restart(void) __noreturn;
63721 #include <asm/emergency-restart.h>
63722
63723 #endif
63724diff --git a/include/linux/relay.h b/include/linux/relay.h
63725index 91cacc3..b55ff74 100644
63726--- a/include/linux/relay.h
63727+++ b/include/linux/relay.h
63728@@ -160,7 +160,7 @@ struct rchan_callbacks
63729 * The callback should return 0 if successful, negative if not.
63730 */
63731 int (*remove_buf_file)(struct dentry *dentry);
63732-};
63733+} __no_const;
63734
63735 /*
63736 * CONFIG_RELAY kernel API, kernel/relay.c
63737diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
63738index 6fdf027..ff72610 100644
63739--- a/include/linux/rfkill.h
63740+++ b/include/linux/rfkill.h
63741@@ -147,6 +147,7 @@ struct rfkill_ops {
63742 void (*query)(struct rfkill *rfkill, void *data);
63743 int (*set_block)(void *data, bool blocked);
63744 };
63745+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
63746
63747 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
63748 /**
63749diff --git a/include/linux/rio.h b/include/linux/rio.h
63750index a90ebad..fd87b5d 100644
63751--- a/include/linux/rio.h
63752+++ b/include/linux/rio.h
63753@@ -321,7 +321,7 @@ struct rio_ops {
63754 int mbox, void *buffer, size_t len);
63755 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
63756 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
63757-};
63758+} __no_const;
63759
63760 #define RIO_RESOURCE_MEM 0x00000100
63761 #define RIO_RESOURCE_DOORBELL 0x00000200
63762diff --git a/include/linux/rmap.h b/include/linux/rmap.h
63763index 3fce545..b4fed6e 100644
63764--- a/include/linux/rmap.h
63765+++ b/include/linux/rmap.h
63766@@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
63767 void anon_vma_init(void); /* create anon_vma_cachep */
63768 int anon_vma_prepare(struct vm_area_struct *);
63769 void unlink_anon_vmas(struct vm_area_struct *);
63770-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
63771+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
63772 void anon_vma_moveto_tail(struct vm_area_struct *);
63773-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
63774+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
63775
63776 static inline void anon_vma_merge(struct vm_area_struct *vma,
63777 struct vm_area_struct *next)
63778diff --git a/include/linux/sched.h b/include/linux/sched.h
63779index 4a1f493..5812aeb 100644
63780--- a/include/linux/sched.h
63781+++ b/include/linux/sched.h
63782@@ -101,6 +101,7 @@ struct bio_list;
63783 struct fs_struct;
63784 struct perf_event_context;
63785 struct blk_plug;
63786+struct linux_binprm;
63787
63788 /*
63789 * List of flags we want to share for kernel threads,
63790@@ -384,10 +385,13 @@ struct user_namespace;
63791 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
63792
63793 extern int sysctl_max_map_count;
63794+extern unsigned long sysctl_heap_stack_gap;
63795
63796 #include <linux/aio.h>
63797
63798 #ifdef CONFIG_MMU
63799+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
63800+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
63801 extern void arch_pick_mmap_layout(struct mm_struct *mm);
63802 extern unsigned long
63803 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
63804@@ -406,6 +410,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
63805 extern void set_dumpable(struct mm_struct *mm, int value);
63806 extern int get_dumpable(struct mm_struct *mm);
63807
63808+/* get/set_dumpable() values */
63809+#define SUID_DUMPABLE_DISABLED 0
63810+#define SUID_DUMPABLE_ENABLED 1
63811+#define SUID_DUMPABLE_SAFE 2
63812+
63813 /* mm flags */
63814 /* dumpable bits */
63815 #define MMF_DUMPABLE 0 /* core dump is permitted */
63816@@ -646,6 +655,17 @@ struct signal_struct {
63817 #ifdef CONFIG_TASKSTATS
63818 struct taskstats *stats;
63819 #endif
63820+
63821+#ifdef CONFIG_GRKERNSEC
63822+ u32 curr_ip;
63823+ u32 saved_ip;
63824+ u32 gr_saddr;
63825+ u32 gr_daddr;
63826+ u16 gr_sport;
63827+ u16 gr_dport;
63828+ u8 used_accept:1;
63829+#endif
63830+
63831 #ifdef CONFIG_AUDIT
63832 unsigned audit_tty;
63833 struct tty_audit_buf *tty_audit_buf;
63834@@ -729,6 +749,11 @@ struct user_struct {
63835 struct key *session_keyring; /* UID's default session keyring */
63836 #endif
63837
63838+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63839+ unsigned int banned;
63840+ unsigned long ban_expires;
63841+#endif
63842+
63843 /* Hash table maintenance information */
63844 struct hlist_node uidhash_node;
63845 kuid_t uid;
63846@@ -1348,8 +1373,8 @@ struct task_struct {
63847 struct list_head thread_group;
63848
63849 struct completion *vfork_done; /* for vfork() */
63850- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
63851- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63852+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
63853+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63854
63855 cputime_t utime, stime, utimescaled, stimescaled;
63856 cputime_t gtime;
63857@@ -1365,11 +1390,6 @@ struct task_struct {
63858 struct task_cputime cputime_expires;
63859 struct list_head cpu_timers[3];
63860
63861-/* process credentials */
63862- const struct cred __rcu *real_cred; /* objective and real subjective task
63863- * credentials (COW) */
63864- const struct cred __rcu *cred; /* effective (overridable) subjective task
63865- * credentials (COW) */
63866 char comm[TASK_COMM_LEN]; /* executable name excluding path
63867 - access with [gs]et_task_comm (which lock
63868 it with task_lock())
63869@@ -1386,8 +1406,16 @@ struct task_struct {
63870 #endif
63871 /* CPU-specific state of this task */
63872 struct thread_struct thread;
63873+/* thread_info moved to task_struct */
63874+#ifdef CONFIG_X86
63875+ struct thread_info tinfo;
63876+#endif
63877 /* filesystem information */
63878 struct fs_struct *fs;
63879+
63880+ const struct cred __rcu *cred; /* effective (overridable) subjective task
63881+ * credentials (COW) */
63882+
63883 /* open file information */
63884 struct files_struct *files;
63885 /* namespaces */
63886@@ -1431,6 +1459,11 @@ struct task_struct {
63887 struct rt_mutex_waiter *pi_blocked_on;
63888 #endif
63889
63890+/* process credentials */
63891+ const struct cred __rcu *real_cred; /* objective and real subjective task
63892+ * credentials (COW) */
63893+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63894+
63895 #ifdef CONFIG_DEBUG_MUTEXES
63896 /* mutex deadlock detection */
63897 struct mutex_waiter *blocked_on;
63898@@ -1547,6 +1580,27 @@ struct task_struct {
63899 unsigned long default_timer_slack_ns;
63900
63901 struct list_head *scm_work_list;
63902+
63903+#ifdef CONFIG_GRKERNSEC
63904+ /* grsecurity */
63905+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63906+ u64 exec_id;
63907+#endif
63908+#ifdef CONFIG_GRKERNSEC_SETXID
63909+ const struct cred *delayed_cred;
63910+#endif
63911+ struct dentry *gr_chroot_dentry;
63912+ struct acl_subject_label *acl;
63913+ struct acl_role_label *role;
63914+ struct file *exec_file;
63915+ u16 acl_role_id;
63916+ /* is this the task that authenticated to the special role */
63917+ u8 acl_sp_role;
63918+ u8 is_writable;
63919+ u8 brute;
63920+ u8 gr_is_chrooted;
63921+#endif
63922+
63923 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
63924 /* Index of current stored address in ret_stack */
63925 int curr_ret_stack;
63926@@ -1585,6 +1639,51 @@ struct task_struct {
63927 #endif
63928 };
63929
63930+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
63931+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
63932+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
63933+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
63934+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
63935+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
63936+
63937+#ifdef CONFIG_PAX_SOFTMODE
63938+extern int pax_softmode;
63939+#endif
63940+
63941+extern int pax_check_flags(unsigned long *);
63942+
63943+/* if tsk != current then task_lock must be held on it */
63944+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
63945+static inline unsigned long pax_get_flags(struct task_struct *tsk)
63946+{
63947+ if (likely(tsk->mm))
63948+ return tsk->mm->pax_flags;
63949+ else
63950+ return 0UL;
63951+}
63952+
63953+/* if tsk != current then task_lock must be held on it */
63954+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
63955+{
63956+ if (likely(tsk->mm)) {
63957+ tsk->mm->pax_flags = flags;
63958+ return 0;
63959+ }
63960+ return -EINVAL;
63961+}
63962+#endif
63963+
63964+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
63965+extern void pax_set_initial_flags(struct linux_binprm *bprm);
63966+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
63967+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
63968+#endif
63969+
63970+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
63971+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
63972+extern void pax_report_refcount_overflow(struct pt_regs *regs);
63973+extern void check_object_size(const void *ptr, unsigned long n, bool to);
63974+
63975 /* Future-safe accessor for struct task_struct's cpus_allowed. */
63976 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
63977
63978@@ -2112,7 +2211,9 @@ void yield(void);
63979 extern struct exec_domain default_exec_domain;
63980
63981 union thread_union {
63982+#ifndef CONFIG_X86
63983 struct thread_info thread_info;
63984+#endif
63985 unsigned long stack[THREAD_SIZE/sizeof(long)];
63986 };
63987
63988@@ -2145,6 +2246,7 @@ extern struct pid_namespace init_pid_ns;
63989 */
63990
63991 extern struct task_struct *find_task_by_vpid(pid_t nr);
63992+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
63993 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
63994 struct pid_namespace *ns);
63995
63996@@ -2301,7 +2403,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
63997 extern void exit_itimers(struct signal_struct *);
63998 extern void flush_itimer_signals(void);
63999
64000-extern void do_group_exit(int);
64001+extern __noreturn void do_group_exit(int);
64002
64003 extern void daemonize(const char *, ...);
64004 extern int allow_signal(int);
64005@@ -2502,9 +2604,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
64006
64007 #endif
64008
64009-static inline int object_is_on_stack(void *obj)
64010+static inline int object_starts_on_stack(void *obj)
64011 {
64012- void *stack = task_stack_page(current);
64013+ const void *stack = task_stack_page(current);
64014
64015 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
64016 }
64017diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
64018index 899fbb4..1cb4138 100644
64019--- a/include/linux/screen_info.h
64020+++ b/include/linux/screen_info.h
64021@@ -43,7 +43,8 @@ struct screen_info {
64022 __u16 pages; /* 0x32 */
64023 __u16 vesa_attributes; /* 0x34 */
64024 __u32 capabilities; /* 0x36 */
64025- __u8 _reserved[6]; /* 0x3a */
64026+ __u16 vesapm_size; /* 0x3a */
64027+ __u8 _reserved[4]; /* 0x3c */
64028 } __attribute__((packed));
64029
64030 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
64031diff --git a/include/linux/security.h b/include/linux/security.h
64032index 3dea6a9..81fd81f 100644
64033--- a/include/linux/security.h
64034+++ b/include/linux/security.h
64035@@ -26,6 +26,7 @@
64036 #include <linux/capability.h>
64037 #include <linux/slab.h>
64038 #include <linux/err.h>
64039+#include <linux/grsecurity.h>
64040
64041 struct linux_binprm;
64042 struct cred;
64043diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
64044index fc61854..d7c490b 100644
64045--- a/include/linux/seq_file.h
64046+++ b/include/linux/seq_file.h
64047@@ -25,6 +25,9 @@ struct seq_file {
64048 struct mutex lock;
64049 const struct seq_operations *op;
64050 int poll_event;
64051+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64052+ u64 exec_id;
64053+#endif
64054 void *private;
64055 };
64056
64057@@ -34,6 +37,7 @@ struct seq_operations {
64058 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
64059 int (*show) (struct seq_file *m, void *v);
64060 };
64061+typedef struct seq_operations __no_const seq_operations_no_const;
64062
64063 #define SEQ_SKIP 1
64064
64065diff --git a/include/linux/shm.h b/include/linux/shm.h
64066index 92808b8..c28cac4 100644
64067--- a/include/linux/shm.h
64068+++ b/include/linux/shm.h
64069@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
64070
64071 /* The task created the shm object. NULL if the task is dead. */
64072 struct task_struct *shm_creator;
64073+#ifdef CONFIG_GRKERNSEC
64074+ time_t shm_createtime;
64075+ pid_t shm_lapid;
64076+#endif
64077 };
64078
64079 /* shm_mode upper byte flags */
64080diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
64081index 642cb73..2efdb98 100644
64082--- a/include/linux/skbuff.h
64083+++ b/include/linux/skbuff.h
64084@@ -567,7 +567,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
64085 extern struct sk_buff *__alloc_skb(unsigned int size,
64086 gfp_t priority, int fclone, int node);
64087 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
64088-static inline struct sk_buff *alloc_skb(unsigned int size,
64089+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
64090 gfp_t priority)
64091 {
64092 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
64093@@ -680,7 +680,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
64094 */
64095 static inline int skb_queue_empty(const struct sk_buff_head *list)
64096 {
64097- return list->next == (struct sk_buff *)list;
64098+ return list->next == (const struct sk_buff *)list;
64099 }
64100
64101 /**
64102@@ -693,7 +693,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
64103 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
64104 const struct sk_buff *skb)
64105 {
64106- return skb->next == (struct sk_buff *)list;
64107+ return skb->next == (const struct sk_buff *)list;
64108 }
64109
64110 /**
64111@@ -706,7 +706,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
64112 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
64113 const struct sk_buff *skb)
64114 {
64115- return skb->prev == (struct sk_buff *)list;
64116+ return skb->prev == (const struct sk_buff *)list;
64117 }
64118
64119 /**
64120@@ -1605,7 +1605,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
64121 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
64122 */
64123 #ifndef NET_SKB_PAD
64124-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
64125+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
64126 #endif
64127
64128 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
64129@@ -2112,7 +2112,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
64130 int noblock, int *err);
64131 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
64132 struct poll_table_struct *wait);
64133-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
64134+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
64135 int offset, struct iovec *to,
64136 int size);
64137 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
64138diff --git a/include/linux/slab.h b/include/linux/slab.h
64139index 67d5d94..bbd740b 100644
64140--- a/include/linux/slab.h
64141+++ b/include/linux/slab.h
64142@@ -11,12 +11,20 @@
64143
64144 #include <linux/gfp.h>
64145 #include <linux/types.h>
64146+#include <linux/err.h>
64147
64148 /*
64149 * Flags to pass to kmem_cache_create().
64150 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
64151 */
64152 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
64153+
64154+#ifdef CONFIG_PAX_USERCOPY_SLABS
64155+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
64156+#else
64157+#define SLAB_USERCOPY 0x00000000UL
64158+#endif
64159+
64160 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
64161 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
64162 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
64163@@ -87,10 +95,13 @@
64164 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
64165 * Both make kfree a no-op.
64166 */
64167-#define ZERO_SIZE_PTR ((void *)16)
64168+#define ZERO_SIZE_PTR \
64169+({ \
64170+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
64171+ (void *)(-MAX_ERRNO-1L); \
64172+})
64173
64174-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
64175- (unsigned long)ZERO_SIZE_PTR)
64176+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
64177
64178 /*
64179 * struct kmem_cache related prototypes
64180@@ -161,6 +172,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
64181 void kfree(const void *);
64182 void kzfree(const void *);
64183 size_t ksize(const void *);
64184+const char *check_heap_object(const void *ptr, unsigned long n, bool to);
64185+bool is_usercopy_object(const void *ptr);
64186
64187 /*
64188 * Allocator specific definitions. These are mainly used to establish optimized
64189@@ -298,7 +311,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
64190 */
64191 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
64192 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
64193-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
64194+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
64195 #define kmalloc_track_caller(size, flags) \
64196 __kmalloc_track_caller(size, flags, _RET_IP_)
64197 #else
64198@@ -317,7 +330,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
64199 */
64200 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
64201 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
64202-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
64203+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
64204 #define kmalloc_node_track_caller(size, flags, node) \
64205 __kmalloc_node_track_caller(size, flags, node, \
64206 _RET_IP_)
64207diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
64208index fbd1117..0a3d314 100644
64209--- a/include/linux/slab_def.h
64210+++ b/include/linux/slab_def.h
64211@@ -66,10 +66,10 @@ struct kmem_cache {
64212 unsigned long node_allocs;
64213 unsigned long node_frees;
64214 unsigned long node_overflow;
64215- atomic_t allochit;
64216- atomic_t allocmiss;
64217- atomic_t freehit;
64218- atomic_t freemiss;
64219+ atomic_unchecked_t allochit;
64220+ atomic_unchecked_t allocmiss;
64221+ atomic_unchecked_t freehit;
64222+ atomic_unchecked_t freemiss;
64223
64224 /*
64225 * If debugging is enabled, then the allocator can add additional
64226@@ -103,11 +103,16 @@ struct cache_sizes {
64227 #ifdef CONFIG_ZONE_DMA
64228 struct kmem_cache *cs_dmacachep;
64229 #endif
64230+
64231+#ifdef CONFIG_PAX_USERCOPY_SLABS
64232+ struct kmem_cache *cs_usercopycachep;
64233+#endif
64234+
64235 };
64236 extern struct cache_sizes malloc_sizes[];
64237
64238 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
64239-void *__kmalloc(size_t size, gfp_t flags);
64240+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64241
64242 #ifdef CONFIG_TRACING
64243 extern void *kmem_cache_alloc_trace(size_t size,
64244@@ -150,6 +155,13 @@ found:
64245 cachep = malloc_sizes[i].cs_dmacachep;
64246 else
64247 #endif
64248+
64249+#ifdef CONFIG_PAX_USERCOPY_SLABS
64250+ if (flags & GFP_USERCOPY)
64251+ cachep = malloc_sizes[i].cs_usercopycachep;
64252+ else
64253+#endif
64254+
64255 cachep = malloc_sizes[i].cs_cachep;
64256
64257 ret = kmem_cache_alloc_trace(size, cachep, flags);
64258@@ -160,7 +172,7 @@ found:
64259 }
64260
64261 #ifdef CONFIG_NUMA
64262-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
64263+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64264 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
64265
64266 #ifdef CONFIG_TRACING
64267@@ -203,6 +215,13 @@ found:
64268 cachep = malloc_sizes[i].cs_dmacachep;
64269 else
64270 #endif
64271+
64272+#ifdef CONFIG_PAX_USERCOPY_SLABS
64273+ if (flags & GFP_USERCOPY)
64274+ cachep = malloc_sizes[i].cs_usercopycachep;
64275+ else
64276+#endif
64277+
64278 cachep = malloc_sizes[i].cs_cachep;
64279
64280 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
64281diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
64282index 0ec00b3..22b4715 100644
64283--- a/include/linux/slob_def.h
64284+++ b/include/linux/slob_def.h
64285@@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
64286 return kmem_cache_alloc_node(cachep, flags, -1);
64287 }
64288
64289-void *__kmalloc_node(size_t size, gfp_t flags, int node);
64290+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64291
64292 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
64293 {
64294@@ -29,7 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
64295 return __kmalloc_node(size, flags, -1);
64296 }
64297
64298-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
64299+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
64300 {
64301 return kmalloc(size, flags);
64302 }
64303diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
64304index c2f8c8b..d992a41 100644
64305--- a/include/linux/slub_def.h
64306+++ b/include/linux/slub_def.h
64307@@ -92,7 +92,7 @@ struct kmem_cache {
64308 struct kmem_cache_order_objects max;
64309 struct kmem_cache_order_objects min;
64310 gfp_t allocflags; /* gfp flags to use on each alloc */
64311- int refcount; /* Refcount for slab cache destroy */
64312+ atomic_t refcount; /* Refcount for slab cache destroy */
64313 void (*ctor)(void *);
64314 int inuse; /* Offset to metadata */
64315 int align; /* Alignment */
64316@@ -153,7 +153,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
64317 * Sorry that the following has to be that ugly but some versions of GCC
64318 * have trouble with constant propagation and loops.
64319 */
64320-static __always_inline int kmalloc_index(size_t size)
64321+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
64322 {
64323 if (!size)
64324 return 0;
64325@@ -218,7 +218,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
64326 }
64327
64328 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
64329-void *__kmalloc(size_t size, gfp_t flags);
64330+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
64331
64332 static __always_inline void *
64333 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
64334@@ -259,7 +259,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
64335 }
64336 #endif
64337
64338-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
64339+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
64340 {
64341 unsigned int order = get_order(size);
64342 return kmalloc_order_trace(size, flags, order);
64343@@ -284,7 +284,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
64344 }
64345
64346 #ifdef CONFIG_NUMA
64347-void *__kmalloc_node(size_t size, gfp_t flags, int node);
64348+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64349 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
64350
64351 #ifdef CONFIG_TRACING
64352diff --git a/include/linux/sonet.h b/include/linux/sonet.h
64353index de8832d..0147b46 100644
64354--- a/include/linux/sonet.h
64355+++ b/include/linux/sonet.h
64356@@ -61,7 +61,7 @@ struct sonet_stats {
64357 #include <linux/atomic.h>
64358
64359 struct k_sonet_stats {
64360-#define __HANDLE_ITEM(i) atomic_t i
64361+#define __HANDLE_ITEM(i) atomic_unchecked_t i
64362 __SONET_ITEMS
64363 #undef __HANDLE_ITEM
64364 };
64365diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
64366index 523547e..2cb7140 100644
64367--- a/include/linux/sunrpc/clnt.h
64368+++ b/include/linux/sunrpc/clnt.h
64369@@ -174,9 +174,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
64370 {
64371 switch (sap->sa_family) {
64372 case AF_INET:
64373- return ntohs(((struct sockaddr_in *)sap)->sin_port);
64374+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
64375 case AF_INET6:
64376- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
64377+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
64378 }
64379 return 0;
64380 }
64381@@ -209,7 +209,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
64382 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
64383 const struct sockaddr *src)
64384 {
64385- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
64386+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
64387 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
64388
64389 dsin->sin_family = ssin->sin_family;
64390@@ -312,7 +312,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
64391 if (sa->sa_family != AF_INET6)
64392 return 0;
64393
64394- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
64395+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
64396 }
64397
64398 #endif /* __KERNEL__ */
64399diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
64400index dc0c3cc..8503fb6 100644
64401--- a/include/linux/sunrpc/sched.h
64402+++ b/include/linux/sunrpc/sched.h
64403@@ -106,6 +106,7 @@ struct rpc_call_ops {
64404 void (*rpc_count_stats)(struct rpc_task *, void *);
64405 void (*rpc_release)(void *);
64406 };
64407+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
64408
64409 struct rpc_task_setup {
64410 struct rpc_task *task;
64411diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
64412index 0b8e3e6..33e0a01 100644
64413--- a/include/linux/sunrpc/svc_rdma.h
64414+++ b/include/linux/sunrpc/svc_rdma.h
64415@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
64416 extern unsigned int svcrdma_max_requests;
64417 extern unsigned int svcrdma_max_req_size;
64418
64419-extern atomic_t rdma_stat_recv;
64420-extern atomic_t rdma_stat_read;
64421-extern atomic_t rdma_stat_write;
64422-extern atomic_t rdma_stat_sq_starve;
64423-extern atomic_t rdma_stat_rq_starve;
64424-extern atomic_t rdma_stat_rq_poll;
64425-extern atomic_t rdma_stat_rq_prod;
64426-extern atomic_t rdma_stat_sq_poll;
64427-extern atomic_t rdma_stat_sq_prod;
64428+extern atomic_unchecked_t rdma_stat_recv;
64429+extern atomic_unchecked_t rdma_stat_read;
64430+extern atomic_unchecked_t rdma_stat_write;
64431+extern atomic_unchecked_t rdma_stat_sq_starve;
64432+extern atomic_unchecked_t rdma_stat_rq_starve;
64433+extern atomic_unchecked_t rdma_stat_rq_poll;
64434+extern atomic_unchecked_t rdma_stat_rq_prod;
64435+extern atomic_unchecked_t rdma_stat_sq_poll;
64436+extern atomic_unchecked_t rdma_stat_sq_prod;
64437
64438 #define RPCRDMA_VERSION 1
64439
64440diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
64441index c34b4c8..a65b67d 100644
64442--- a/include/linux/sysctl.h
64443+++ b/include/linux/sysctl.h
64444@@ -155,7 +155,11 @@ enum
64445 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
64446 };
64447
64448-
64449+#ifdef CONFIG_PAX_SOFTMODE
64450+enum {
64451+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
64452+};
64453+#endif
64454
64455 /* CTL_VM names: */
64456 enum
64457@@ -948,6 +952,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
64458
64459 extern int proc_dostring(struct ctl_table *, int,
64460 void __user *, size_t *, loff_t *);
64461+extern int proc_dostring_modpriv(struct ctl_table *, int,
64462+ void __user *, size_t *, loff_t *);
64463 extern int proc_dointvec(struct ctl_table *, int,
64464 void __user *, size_t *, loff_t *);
64465 extern int proc_dointvec_minmax(struct ctl_table *, int,
64466diff --git a/include/linux/tty.h b/include/linux/tty.h
64467index 9f47ab5..73da944 100644
64468--- a/include/linux/tty.h
64469+++ b/include/linux/tty.h
64470@@ -225,7 +225,7 @@ struct tty_port {
64471 const struct tty_port_operations *ops; /* Port operations */
64472 spinlock_t lock; /* Lock protecting tty field */
64473 int blocked_open; /* Waiting to open */
64474- int count; /* Usage count */
64475+ atomic_t count; /* Usage count */
64476 wait_queue_head_t open_wait; /* Open waiters */
64477 wait_queue_head_t close_wait; /* Close waiters */
64478 wait_queue_head_t delta_msr_wait; /* Modem status change */
64479@@ -525,7 +525,7 @@ extern int tty_port_open(struct tty_port *port,
64480 struct tty_struct *tty, struct file *filp);
64481 static inline int tty_port_users(struct tty_port *port)
64482 {
64483- return port->count + port->blocked_open;
64484+ return atomic_read(&port->count) + port->blocked_open;
64485 }
64486
64487 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
64488diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
64489index fb79dd8d..07d4773 100644
64490--- a/include/linux/tty_ldisc.h
64491+++ b/include/linux/tty_ldisc.h
64492@@ -149,7 +149,7 @@ struct tty_ldisc_ops {
64493
64494 struct module *owner;
64495
64496- int refcount;
64497+ atomic_t refcount;
64498 };
64499
64500 struct tty_ldisc {
64501diff --git a/include/linux/types.h b/include/linux/types.h
64502index 9c1bd53..c2370f6 100644
64503--- a/include/linux/types.h
64504+++ b/include/linux/types.h
64505@@ -220,10 +220,26 @@ typedef struct {
64506 int counter;
64507 } atomic_t;
64508
64509+#ifdef CONFIG_PAX_REFCOUNT
64510+typedef struct {
64511+ int counter;
64512+} atomic_unchecked_t;
64513+#else
64514+typedef atomic_t atomic_unchecked_t;
64515+#endif
64516+
64517 #ifdef CONFIG_64BIT
64518 typedef struct {
64519 long counter;
64520 } atomic64_t;
64521+
64522+#ifdef CONFIG_PAX_REFCOUNT
64523+typedef struct {
64524+ long counter;
64525+} atomic64_unchecked_t;
64526+#else
64527+typedef atomic64_t atomic64_unchecked_t;
64528+#endif
64529 #endif
64530
64531 struct list_head {
64532diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
64533index 5ca0951..ab496a5 100644
64534--- a/include/linux/uaccess.h
64535+++ b/include/linux/uaccess.h
64536@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
64537 long ret; \
64538 mm_segment_t old_fs = get_fs(); \
64539 \
64540- set_fs(KERNEL_DS); \
64541 pagefault_disable(); \
64542- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
64543- pagefault_enable(); \
64544+ set_fs(KERNEL_DS); \
64545+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
64546 set_fs(old_fs); \
64547+ pagefault_enable(); \
64548 ret; \
64549 })
64550
64551diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
64552index 99c1b4d..bb94261 100644
64553--- a/include/linux/unaligned/access_ok.h
64554+++ b/include/linux/unaligned/access_ok.h
64555@@ -6,32 +6,32 @@
64556
64557 static inline u16 get_unaligned_le16(const void *p)
64558 {
64559- return le16_to_cpup((__le16 *)p);
64560+ return le16_to_cpup((const __le16 *)p);
64561 }
64562
64563 static inline u32 get_unaligned_le32(const void *p)
64564 {
64565- return le32_to_cpup((__le32 *)p);
64566+ return le32_to_cpup((const __le32 *)p);
64567 }
64568
64569 static inline u64 get_unaligned_le64(const void *p)
64570 {
64571- return le64_to_cpup((__le64 *)p);
64572+ return le64_to_cpup((const __le64 *)p);
64573 }
64574
64575 static inline u16 get_unaligned_be16(const void *p)
64576 {
64577- return be16_to_cpup((__be16 *)p);
64578+ return be16_to_cpup((const __be16 *)p);
64579 }
64580
64581 static inline u32 get_unaligned_be32(const void *p)
64582 {
64583- return be32_to_cpup((__be32 *)p);
64584+ return be32_to_cpup((const __be32 *)p);
64585 }
64586
64587 static inline u64 get_unaligned_be64(const void *p)
64588 {
64589- return be64_to_cpup((__be64 *)p);
64590+ return be64_to_cpup((const __be64 *)p);
64591 }
64592
64593 static inline void put_unaligned_le16(u16 val, void *p)
64594diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
64595index 547e59c..db6ad19 100644
64596--- a/include/linux/usb/renesas_usbhs.h
64597+++ b/include/linux/usb/renesas_usbhs.h
64598@@ -39,7 +39,7 @@ enum {
64599 */
64600 struct renesas_usbhs_driver_callback {
64601 int (*notify_hotplug)(struct platform_device *pdev);
64602-};
64603+} __no_const;
64604
64605 /*
64606 * callback functions for platform
64607@@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
64608 * VBUS control is needed for Host
64609 */
64610 int (*set_vbus)(struct platform_device *pdev, int enable);
64611-};
64612+} __no_const;
64613
64614 /*
64615 * parameters for renesas usbhs
64616diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
64617index 6f8fbcf..8259001 100644
64618--- a/include/linux/vermagic.h
64619+++ b/include/linux/vermagic.h
64620@@ -25,9 +25,35 @@
64621 #define MODULE_ARCH_VERMAGIC ""
64622 #endif
64623
64624+#ifdef CONFIG_PAX_REFCOUNT
64625+#define MODULE_PAX_REFCOUNT "REFCOUNT "
64626+#else
64627+#define MODULE_PAX_REFCOUNT ""
64628+#endif
64629+
64630+#ifdef CONSTIFY_PLUGIN
64631+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
64632+#else
64633+#define MODULE_CONSTIFY_PLUGIN ""
64634+#endif
64635+
64636+#ifdef STACKLEAK_PLUGIN
64637+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
64638+#else
64639+#define MODULE_STACKLEAK_PLUGIN ""
64640+#endif
64641+
64642+#ifdef CONFIG_GRKERNSEC
64643+#define MODULE_GRSEC "GRSEC "
64644+#else
64645+#define MODULE_GRSEC ""
64646+#endif
64647+
64648 #define VERMAGIC_STRING \
64649 UTS_RELEASE " " \
64650 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
64651 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
64652- MODULE_ARCH_VERMAGIC
64653+ MODULE_ARCH_VERMAGIC \
64654+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
64655+ MODULE_GRSEC
64656
64657diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
64658index dcdfc2b..ec79ab5 100644
64659--- a/include/linux/vmalloc.h
64660+++ b/include/linux/vmalloc.h
64661@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
64662 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
64663 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
64664 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
64665+
64666+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64667+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
64668+#endif
64669+
64670 /* bits [20..32] reserved for arch specific ioremap internals */
64671
64672 /*
64673@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
64674 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
64675 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
64676 unsigned long start, unsigned long end, gfp_t gfp_mask,
64677- pgprot_t prot, int node, void *caller);
64678+ pgprot_t prot, int node, void *caller) __size_overflow(1);
64679 extern void vfree(const void *addr);
64680
64681 extern void *vmap(struct page **pages, unsigned int count,
64682@@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
64683 extern void free_vm_area(struct vm_struct *area);
64684
64685 /* for /dev/kmem */
64686-extern long vread(char *buf, char *addr, unsigned long count);
64687-extern long vwrite(char *buf, char *addr, unsigned long count);
64688+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
64689+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
64690
64691 /*
64692 * Internals. Dont't use..
64693diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
64694index 65efb92..137adbb 100644
64695--- a/include/linux/vmstat.h
64696+++ b/include/linux/vmstat.h
64697@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
64698 /*
64699 * Zone based page accounting with per cpu differentials.
64700 */
64701-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64702+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64703
64704 static inline void zone_page_state_add(long x, struct zone *zone,
64705 enum zone_stat_item item)
64706 {
64707- atomic_long_add(x, &zone->vm_stat[item]);
64708- atomic_long_add(x, &vm_stat[item]);
64709+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
64710+ atomic_long_add_unchecked(x, &vm_stat[item]);
64711 }
64712
64713 static inline unsigned long global_page_state(enum zone_stat_item item)
64714 {
64715- long x = atomic_long_read(&vm_stat[item]);
64716+ long x = atomic_long_read_unchecked(&vm_stat[item]);
64717 #ifdef CONFIG_SMP
64718 if (x < 0)
64719 x = 0;
64720@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
64721 static inline unsigned long zone_page_state(struct zone *zone,
64722 enum zone_stat_item item)
64723 {
64724- long x = atomic_long_read(&zone->vm_stat[item]);
64725+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
64726 #ifdef CONFIG_SMP
64727 if (x < 0)
64728 x = 0;
64729@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
64730 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
64731 enum zone_stat_item item)
64732 {
64733- long x = atomic_long_read(&zone->vm_stat[item]);
64734+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
64735
64736 #ifdef CONFIG_SMP
64737 int cpu;
64738@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
64739
64740 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
64741 {
64742- atomic_long_inc(&zone->vm_stat[item]);
64743- atomic_long_inc(&vm_stat[item]);
64744+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
64745+ atomic_long_inc_unchecked(&vm_stat[item]);
64746 }
64747
64748 static inline void __inc_zone_page_state(struct page *page,
64749@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
64750
64751 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
64752 {
64753- atomic_long_dec(&zone->vm_stat[item]);
64754- atomic_long_dec(&vm_stat[item]);
64755+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
64756+ atomic_long_dec_unchecked(&vm_stat[item]);
64757 }
64758
64759 static inline void __dec_zone_page_state(struct page *page,
64760diff --git a/include/linux/xattr.h b/include/linux/xattr.h
64761index e5d1220..ef6e406 100644
64762--- a/include/linux/xattr.h
64763+++ b/include/linux/xattr.h
64764@@ -57,6 +57,11 @@
64765 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
64766 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
64767
64768+/* User namespace */
64769+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
64770+#define XATTR_PAX_FLAGS_SUFFIX "flags"
64771+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
64772+
64773 #ifdef __KERNEL__
64774
64775 #include <linux/types.h>
64776diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
64777index 944ecdf..a3994fc 100644
64778--- a/include/media/saa7146_vv.h
64779+++ b/include/media/saa7146_vv.h
64780@@ -161,8 +161,8 @@ struct saa7146_ext_vv
64781 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
64782
64783 /* the extension can override this */
64784- struct v4l2_ioctl_ops vid_ops;
64785- struct v4l2_ioctl_ops vbi_ops;
64786+ v4l2_ioctl_ops_no_const vid_ops;
64787+ v4l2_ioctl_ops_no_const vbi_ops;
64788 /* pointer to the saa7146 core ops */
64789 const struct v4l2_ioctl_ops *core_ops;
64790
64791diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
64792index a056e6e..31023a5 100644
64793--- a/include/media/v4l2-dev.h
64794+++ b/include/media/v4l2-dev.h
64795@@ -73,7 +73,8 @@ struct v4l2_file_operations {
64796 int (*mmap) (struct file *, struct vm_area_struct *);
64797 int (*open) (struct file *);
64798 int (*release) (struct file *);
64799-};
64800+} __do_const;
64801+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
64802
64803 /*
64804 * Newer version of video_device, handled by videodev2.c
64805diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
64806index d8b76f7..7d5aa18 100644
64807--- a/include/media/v4l2-ioctl.h
64808+++ b/include/media/v4l2-ioctl.h
64809@@ -287,7 +287,7 @@ struct v4l2_ioctl_ops {
64810 long (*vidioc_default) (struct file *file, void *fh,
64811 bool valid_prio, int cmd, void *arg);
64812 };
64813-
64814+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
64815
64816 /* v4l debugging and diagnostics */
64817
64818diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
64819index 439dadc..1c67e3f 100644
64820--- a/include/net/caif/caif_hsi.h
64821+++ b/include/net/caif/caif_hsi.h
64822@@ -98,7 +98,7 @@ struct cfhsi_drv {
64823 void (*rx_done_cb) (struct cfhsi_drv *drv);
64824 void (*wake_up_cb) (struct cfhsi_drv *drv);
64825 void (*wake_down_cb) (struct cfhsi_drv *drv);
64826-};
64827+} __no_const;
64828
64829 /* Structure implemented by HSI device. */
64830 struct cfhsi_dev {
64831diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
64832index 9e5425b..8136ffc 100644
64833--- a/include/net/caif/cfctrl.h
64834+++ b/include/net/caif/cfctrl.h
64835@@ -52,7 +52,7 @@ struct cfctrl_rsp {
64836 void (*radioset_rsp)(void);
64837 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
64838 struct cflayer *client_layer);
64839-};
64840+} __no_const;
64841
64842 /* Link Setup Parameters for CAIF-Links. */
64843 struct cfctrl_link_param {
64844@@ -101,8 +101,8 @@ struct cfctrl_request_info {
64845 struct cfctrl {
64846 struct cfsrvl serv;
64847 struct cfctrl_rsp res;
64848- atomic_t req_seq_no;
64849- atomic_t rsp_seq_no;
64850+ atomic_unchecked_t req_seq_no;
64851+ atomic_unchecked_t rsp_seq_no;
64852 struct list_head list;
64853 /* Protects from simultaneous access to first_req list */
64854 spinlock_t info_list_lock;
64855diff --git a/include/net/flow.h b/include/net/flow.h
64856index 6c469db..7743b8e 100644
64857--- a/include/net/flow.h
64858+++ b/include/net/flow.h
64859@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
64860
64861 extern void flow_cache_flush(void);
64862 extern void flow_cache_flush_deferred(void);
64863-extern atomic_t flow_cache_genid;
64864+extern atomic_unchecked_t flow_cache_genid;
64865
64866 #endif
64867diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
64868index 2040bff..f4c0733 100644
64869--- a/include/net/inetpeer.h
64870+++ b/include/net/inetpeer.h
64871@@ -51,8 +51,8 @@ struct inet_peer {
64872 */
64873 union {
64874 struct {
64875- atomic_t rid; /* Frag reception counter */
64876- atomic_t ip_id_count; /* IP ID for the next packet */
64877+ atomic_unchecked_t rid; /* Frag reception counter */
64878+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
64879 __u32 tcp_ts;
64880 __u32 tcp_ts_stamp;
64881 };
64882@@ -118,11 +118,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
64883 more++;
64884 inet_peer_refcheck(p);
64885 do {
64886- old = atomic_read(&p->ip_id_count);
64887+ old = atomic_read_unchecked(&p->ip_id_count);
64888 new = old + more;
64889 if (!new)
64890 new = 1;
64891- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
64892+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
64893 return new;
64894 }
64895
64896diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
64897index 78df0866..00e5c9b 100644
64898--- a/include/net/ip_fib.h
64899+++ b/include/net/ip_fib.h
64900@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
64901
64902 #define FIB_RES_SADDR(net, res) \
64903 ((FIB_RES_NH(res).nh_saddr_genid == \
64904- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
64905+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
64906 FIB_RES_NH(res).nh_saddr : \
64907 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
64908 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
64909diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
64910index 95374d1..2300e36 100644
64911--- a/include/net/ip_vs.h
64912+++ b/include/net/ip_vs.h
64913@@ -510,7 +510,7 @@ struct ip_vs_conn {
64914 struct ip_vs_conn *control; /* Master control connection */
64915 atomic_t n_control; /* Number of controlled ones */
64916 struct ip_vs_dest *dest; /* real server */
64917- atomic_t in_pkts; /* incoming packet counter */
64918+ atomic_unchecked_t in_pkts; /* incoming packet counter */
64919
64920 /* packet transmitter for different forwarding methods. If it
64921 mangles the packet, it must return NF_DROP or better NF_STOLEN,
64922@@ -648,7 +648,7 @@ struct ip_vs_dest {
64923 __be16 port; /* port number of the server */
64924 union nf_inet_addr addr; /* IP address of the server */
64925 volatile unsigned int flags; /* dest status flags */
64926- atomic_t conn_flags; /* flags to copy to conn */
64927+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
64928 atomic_t weight; /* server weight */
64929
64930 atomic_t refcnt; /* reference counter */
64931diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
64932index 69b610a..fe3962c 100644
64933--- a/include/net/irda/ircomm_core.h
64934+++ b/include/net/irda/ircomm_core.h
64935@@ -51,7 +51,7 @@ typedef struct {
64936 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
64937 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
64938 struct ircomm_info *);
64939-} call_t;
64940+} __no_const call_t;
64941
64942 struct ircomm_cb {
64943 irda_queue_t queue;
64944diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
64945index 59ba38bc..d515662 100644
64946--- a/include/net/irda/ircomm_tty.h
64947+++ b/include/net/irda/ircomm_tty.h
64948@@ -35,6 +35,7 @@
64949 #include <linux/termios.h>
64950 #include <linux/timer.h>
64951 #include <linux/tty.h> /* struct tty_struct */
64952+#include <asm/local.h>
64953
64954 #include <net/irda/irias_object.h>
64955 #include <net/irda/ircomm_core.h>
64956@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
64957 unsigned short close_delay;
64958 unsigned short closing_wait; /* time to wait before closing */
64959
64960- int open_count;
64961- int blocked_open; /* # of blocked opens */
64962+ local_t open_count;
64963+ local_t blocked_open; /* # of blocked opens */
64964
64965 /* Protect concurent access to :
64966 * o self->open_count
64967diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
64968index cc7c197..9f2da2a 100644
64969--- a/include/net/iucv/af_iucv.h
64970+++ b/include/net/iucv/af_iucv.h
64971@@ -141,7 +141,7 @@ struct iucv_sock {
64972 struct iucv_sock_list {
64973 struct hlist_head head;
64974 rwlock_t lock;
64975- atomic_t autobind_name;
64976+ atomic_unchecked_t autobind_name;
64977 };
64978
64979 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
64980diff --git a/include/net/neighbour.h b/include/net/neighbour.h
64981index 6cdfeed..55a0256 100644
64982--- a/include/net/neighbour.h
64983+++ b/include/net/neighbour.h
64984@@ -123,7 +123,7 @@ struct neigh_ops {
64985 void (*error_report)(struct neighbour *, struct sk_buff *);
64986 int (*output)(struct neighbour *, struct sk_buff *);
64987 int (*connected_output)(struct neighbour *, struct sk_buff *);
64988-};
64989+} __do_const;
64990
64991 struct pneigh_entry {
64992 struct pneigh_entry *next;
64993diff --git a/include/net/netdma.h b/include/net/netdma.h
64994index 8ba8ce2..99b7fff 100644
64995--- a/include/net/netdma.h
64996+++ b/include/net/netdma.h
64997@@ -24,7 +24,7 @@
64998 #include <linux/dmaengine.h>
64999 #include <linux/skbuff.h>
65000
65001-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
65002+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
65003 struct sk_buff *skb, int offset, struct iovec *to,
65004 size_t len, struct dma_pinned_list *pinned_list);
65005
65006diff --git a/include/net/netlink.h b/include/net/netlink.h
65007index 785f37a..c81dc0c 100644
65008--- a/include/net/netlink.h
65009+++ b/include/net/netlink.h
65010@@ -520,7 +520,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
65011 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
65012 {
65013 if (mark)
65014- skb_trim(skb, (unsigned char *) mark - skb->data);
65015+ skb_trim(skb, (const unsigned char *) mark - skb->data);
65016 }
65017
65018 /**
65019diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
65020index bbd023a..97c6d0d 100644
65021--- a/include/net/netns/ipv4.h
65022+++ b/include/net/netns/ipv4.h
65023@@ -57,8 +57,8 @@ struct netns_ipv4 {
65024 unsigned int sysctl_ping_group_range[2];
65025 long sysctl_tcp_mem[3];
65026
65027- atomic_t rt_genid;
65028- atomic_t dev_addr_genid;
65029+ atomic_unchecked_t rt_genid;
65030+ atomic_unchecked_t dev_addr_genid;
65031
65032 #ifdef CONFIG_IP_MROUTE
65033 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
65034diff --git a/include/net/scm.h b/include/net/scm.h
65035index d456f4c..0c0017c 100644
65036--- a/include/net/scm.h
65037+++ b/include/net/scm.h
65038@@ -71,9 +71,11 @@ static __inline__ void scm_destroy(struct scm_cookie *scm)
65039 }
65040
65041 static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
65042- struct scm_cookie *scm)
65043+ struct scm_cookie *scm, bool forcecreds)
65044 {
65045 memset(scm, 0, sizeof(*scm));
65046+ if (forcecreds)
65047+ scm_set_cred(scm, task_tgid(current), current_cred());
65048 unix_get_peersec_dgram(sock, scm);
65049 if (msg->msg_controllen <= 0)
65050 return 0;
65051diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
65052index a2ef814..31a8e3f 100644
65053--- a/include/net/sctp/sctp.h
65054+++ b/include/net/sctp/sctp.h
65055@@ -318,9 +318,9 @@ do { \
65056
65057 #else /* SCTP_DEBUG */
65058
65059-#define SCTP_DEBUG_PRINTK(whatever...)
65060-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
65061-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
65062+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
65063+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
65064+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
65065 #define SCTP_ENABLE_DEBUG
65066 #define SCTP_DISABLE_DEBUG
65067 #define SCTP_ASSERT(expr, str, func)
65068diff --git a/include/net/sock.h b/include/net/sock.h
65069index 4a45216..2ae7cd8 100644
65070--- a/include/net/sock.h
65071+++ b/include/net/sock.h
65072@@ -303,7 +303,7 @@ struct sock {
65073 #ifdef CONFIG_RPS
65074 __u32 sk_rxhash;
65075 #endif
65076- atomic_t sk_drops;
65077+ atomic_unchecked_t sk_drops;
65078 int sk_rcvbuf;
65079
65080 struct sk_filter __rcu *sk_filter;
65081@@ -1726,7 +1726,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
65082 }
65083
65084 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
65085- char __user *from, char *to,
65086+ char __user *from, unsigned char *to,
65087 int copy, int offset)
65088 {
65089 if (skb->ip_summed == CHECKSUM_NONE) {
65090@@ -1985,7 +1985,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
65091 }
65092 }
65093
65094-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
65095+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
65096
65097 static inline struct page *sk_stream_alloc_page(struct sock *sk)
65098 {
65099diff --git a/include/net/tcp.h b/include/net/tcp.h
65100index e79aa48..05e52de 100644
65101--- a/include/net/tcp.h
65102+++ b/include/net/tcp.h
65103@@ -476,7 +476,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
65104 extern void tcp_xmit_retransmit_queue(struct sock *);
65105 extern void tcp_simple_retransmit(struct sock *);
65106 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
65107-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
65108+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
65109
65110 extern void tcp_send_probe0(struct sock *);
65111 extern void tcp_send_partial(struct sock *);
65112@@ -643,8 +643,8 @@ struct tcp_skb_cb {
65113 struct inet6_skb_parm h6;
65114 #endif
65115 } header; /* For incoming frames */
65116- __u32 seq; /* Starting sequence number */
65117- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
65118+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
65119+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
65120 __u32 when; /* used to compute rtt's */
65121 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
65122
65123@@ -658,7 +658,7 @@ struct tcp_skb_cb {
65124
65125 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
65126 /* 1 byte hole */
65127- __u32 ack_seq; /* Sequence number ACK'd */
65128+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
65129 };
65130
65131 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
65132@@ -1459,7 +1459,7 @@ struct tcp_seq_afinfo {
65133 char *name;
65134 sa_family_t family;
65135 const struct file_operations *seq_fops;
65136- struct seq_operations seq_ops;
65137+ seq_operations_no_const seq_ops;
65138 };
65139
65140 struct tcp_iter_state {
65141diff --git a/include/net/udp.h b/include/net/udp.h
65142index 065f379..b661b40 100644
65143--- a/include/net/udp.h
65144+++ b/include/net/udp.h
65145@@ -244,7 +244,7 @@ struct udp_seq_afinfo {
65146 sa_family_t family;
65147 struct udp_table *udp_table;
65148 const struct file_operations *seq_fops;
65149- struct seq_operations seq_ops;
65150+ seq_operations_no_const seq_ops;
65151 };
65152
65153 struct udp_iter_state {
65154diff --git a/include/net/xfrm.h b/include/net/xfrm.h
65155index e0a55df..5890bca07 100644
65156--- a/include/net/xfrm.h
65157+++ b/include/net/xfrm.h
65158@@ -505,7 +505,7 @@ struct xfrm_policy {
65159 struct timer_list timer;
65160
65161 struct flow_cache_object flo;
65162- atomic_t genid;
65163+ atomic_unchecked_t genid;
65164 u32 priority;
65165 u32 index;
65166 struct xfrm_mark mark;
65167diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
65168index 1a046b1..ee0bef0 100644
65169--- a/include/rdma/iw_cm.h
65170+++ b/include/rdma/iw_cm.h
65171@@ -122,7 +122,7 @@ struct iw_cm_verbs {
65172 int backlog);
65173
65174 int (*destroy_listen)(struct iw_cm_id *cm_id);
65175-};
65176+} __no_const;
65177
65178 /**
65179 * iw_create_cm_id - Create an IW CM identifier.
65180diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
65181index 8f9dfba..610ab6c 100644
65182--- a/include/scsi/libfc.h
65183+++ b/include/scsi/libfc.h
65184@@ -756,6 +756,7 @@ struct libfc_function_template {
65185 */
65186 void (*disc_stop_final) (struct fc_lport *);
65187 };
65188+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
65189
65190 /**
65191 * struct fc_disc - Discovery context
65192@@ -861,7 +862,7 @@ struct fc_lport {
65193 struct fc_vport *vport;
65194
65195 /* Operational Information */
65196- struct libfc_function_template tt;
65197+ libfc_function_template_no_const tt;
65198 u8 link_up;
65199 u8 qfull;
65200 enum fc_lport_state state;
65201diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
65202index ba96988..ecf2eb9 100644
65203--- a/include/scsi/scsi_device.h
65204+++ b/include/scsi/scsi_device.h
65205@@ -163,9 +163,9 @@ struct scsi_device {
65206 unsigned int max_device_blocked; /* what device_blocked counts down from */
65207 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
65208
65209- atomic_t iorequest_cnt;
65210- atomic_t iodone_cnt;
65211- atomic_t ioerr_cnt;
65212+ atomic_unchecked_t iorequest_cnt;
65213+ atomic_unchecked_t iodone_cnt;
65214+ atomic_unchecked_t ioerr_cnt;
65215
65216 struct device sdev_gendev,
65217 sdev_dev;
65218diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
65219index 719faf1..07b6728 100644
65220--- a/include/scsi/scsi_transport_fc.h
65221+++ b/include/scsi/scsi_transport_fc.h
65222@@ -739,7 +739,8 @@ struct fc_function_template {
65223 unsigned long show_host_system_hostname:1;
65224
65225 unsigned long disable_target_scan:1;
65226-};
65227+} __do_const;
65228+typedef struct fc_function_template __no_const fc_function_template_no_const;
65229
65230
65231 /**
65232diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
65233index 030b87c..98a6954 100644
65234--- a/include/sound/ak4xxx-adda.h
65235+++ b/include/sound/ak4xxx-adda.h
65236@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
65237 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
65238 unsigned char val);
65239 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
65240-};
65241+} __no_const;
65242
65243 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
65244
65245diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
65246index 8c05e47..2b5df97 100644
65247--- a/include/sound/hwdep.h
65248+++ b/include/sound/hwdep.h
65249@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
65250 struct snd_hwdep_dsp_status *status);
65251 int (*dsp_load)(struct snd_hwdep *hw,
65252 struct snd_hwdep_dsp_image *image);
65253-};
65254+} __no_const;
65255
65256 struct snd_hwdep {
65257 struct snd_card *card;
65258diff --git a/include/sound/info.h b/include/sound/info.h
65259index 9ca1a49..aba1728 100644
65260--- a/include/sound/info.h
65261+++ b/include/sound/info.h
65262@@ -44,7 +44,7 @@ struct snd_info_entry_text {
65263 struct snd_info_buffer *buffer);
65264 void (*write)(struct snd_info_entry *entry,
65265 struct snd_info_buffer *buffer);
65266-};
65267+} __no_const;
65268
65269 struct snd_info_entry_ops {
65270 int (*open)(struct snd_info_entry *entry,
65271diff --git a/include/sound/pcm.h b/include/sound/pcm.h
65272index 0d11128..814178e 100644
65273--- a/include/sound/pcm.h
65274+++ b/include/sound/pcm.h
65275@@ -81,6 +81,7 @@ struct snd_pcm_ops {
65276 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
65277 int (*ack)(struct snd_pcm_substream *substream);
65278 };
65279+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
65280
65281 /*
65282 *
65283diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
65284index af1b49e..a5d55a5 100644
65285--- a/include/sound/sb16_csp.h
65286+++ b/include/sound/sb16_csp.h
65287@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
65288 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
65289 int (*csp_stop) (struct snd_sb_csp * p);
65290 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
65291-};
65292+} __no_const;
65293
65294 /*
65295 * CSP private data
65296diff --git a/include/sound/soc.h b/include/sound/soc.h
65297index c703871..f7fbbbd 100644
65298--- a/include/sound/soc.h
65299+++ b/include/sound/soc.h
65300@@ -757,7 +757,7 @@ struct snd_soc_platform_driver {
65301 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
65302 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
65303 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
65304-};
65305+} __do_const;
65306
65307 struct snd_soc_platform {
65308 const char *name;
65309@@ -949,7 +949,7 @@ struct snd_soc_pcm_runtime {
65310 struct snd_soc_dai_link *dai_link;
65311 struct mutex pcm_mutex;
65312 enum snd_soc_pcm_subclass pcm_subclass;
65313- struct snd_pcm_ops ops;
65314+ snd_pcm_ops_no_const ops;
65315
65316 unsigned int dev_registered:1;
65317
65318diff --git a/include/sound/tea575x-tuner.h b/include/sound/tea575x-tuner.h
65319index 0c3c2fb..d9d9990 100644
65320--- a/include/sound/tea575x-tuner.h
65321+++ b/include/sound/tea575x-tuner.h
65322@@ -44,7 +44,7 @@ struct snd_tea575x_ops {
65323
65324 struct snd_tea575x {
65325 struct v4l2_device *v4l2_dev;
65326- struct v4l2_file_operations fops;
65327+ v4l2_file_operations_no_const fops;
65328 struct video_device vd; /* video device */
65329 int radio_nr; /* radio_nr */
65330 bool tea5759; /* 5759 chip is present */
65331diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
65332index 4119966..1a4671c 100644
65333--- a/include/sound/ymfpci.h
65334+++ b/include/sound/ymfpci.h
65335@@ -358,7 +358,7 @@ struct snd_ymfpci {
65336 spinlock_t reg_lock;
65337 spinlock_t voice_lock;
65338 wait_queue_head_t interrupt_sleep;
65339- atomic_t interrupt_sleep_count;
65340+ atomic_unchecked_t interrupt_sleep_count;
65341 struct snd_info_entry *proc_entry;
65342 const struct firmware *dsp_microcode;
65343 const struct firmware *controller_microcode;
65344diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
65345index 362e0d9..36b9a83 100644
65346--- a/include/target/target_core_base.h
65347+++ b/include/target/target_core_base.h
65348@@ -441,7 +441,7 @@ struct t10_reservation_ops {
65349 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
65350 int (*t10_pr_register)(struct se_cmd *);
65351 int (*t10_pr_clear)(struct se_cmd *);
65352-};
65353+} __no_const;
65354
65355 struct t10_reservation {
65356 /* Reservation effects all target ports */
65357@@ -780,7 +780,7 @@ struct se_device {
65358 spinlock_t stats_lock;
65359 /* Active commands on this virtual SE device */
65360 atomic_t simple_cmds;
65361- atomic_t dev_ordered_id;
65362+ atomic_unchecked_t dev_ordered_id;
65363 atomic_t execute_tasks;
65364 atomic_t dev_ordered_sync;
65365 atomic_t dev_qf_count;
65366diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
65367new file mode 100644
65368index 0000000..2efe49d
65369--- /dev/null
65370+++ b/include/trace/events/fs.h
65371@@ -0,0 +1,53 @@
65372+#undef TRACE_SYSTEM
65373+#define TRACE_SYSTEM fs
65374+
65375+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
65376+#define _TRACE_FS_H
65377+
65378+#include <linux/fs.h>
65379+#include <linux/tracepoint.h>
65380+
65381+TRACE_EVENT(do_sys_open,
65382+
65383+ TP_PROTO(char *filename, int flags, int mode),
65384+
65385+ TP_ARGS(filename, flags, mode),
65386+
65387+ TP_STRUCT__entry(
65388+ __string( filename, filename )
65389+ __field( int, flags )
65390+ __field( int, mode )
65391+ ),
65392+
65393+ TP_fast_assign(
65394+ __assign_str(filename, filename);
65395+ __entry->flags = flags;
65396+ __entry->mode = mode;
65397+ ),
65398+
65399+ TP_printk("\"%s\" %x %o",
65400+ __get_str(filename), __entry->flags, __entry->mode)
65401+);
65402+
65403+TRACE_EVENT(open_exec,
65404+
65405+ TP_PROTO(const char *filename),
65406+
65407+ TP_ARGS(filename),
65408+
65409+ TP_STRUCT__entry(
65410+ __string( filename, filename )
65411+ ),
65412+
65413+ TP_fast_assign(
65414+ __assign_str(filename, filename);
65415+ ),
65416+
65417+ TP_printk("\"%s\"",
65418+ __get_str(filename))
65419+);
65420+
65421+#endif /* _TRACE_FS_H */
65422+
65423+/* This part must be outside protection */
65424+#include <trace/define_trace.h>
65425diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
65426index 1c09820..7f5ec79 100644
65427--- a/include/trace/events/irq.h
65428+++ b/include/trace/events/irq.h
65429@@ -36,7 +36,7 @@ struct softirq_action;
65430 */
65431 TRACE_EVENT(irq_handler_entry,
65432
65433- TP_PROTO(int irq, struct irqaction *action),
65434+ TP_PROTO(int irq, const struct irqaction *action),
65435
65436 TP_ARGS(irq, action),
65437
65438@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
65439 */
65440 TRACE_EVENT(irq_handler_exit,
65441
65442- TP_PROTO(int irq, struct irqaction *action, int ret),
65443+ TP_PROTO(int irq, const struct irqaction *action, int ret),
65444
65445 TP_ARGS(irq, action, ret),
65446
65447diff --git a/include/video/udlfb.h b/include/video/udlfb.h
65448index f9466fa..f4e2b81 100644
65449--- a/include/video/udlfb.h
65450+++ b/include/video/udlfb.h
65451@@ -53,10 +53,10 @@ struct dlfb_data {
65452 u32 pseudo_palette[256];
65453 int blank_mode; /*one of FB_BLANK_ */
65454 /* blit-only rendering path metrics, exposed through sysfs */
65455- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
65456- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
65457- atomic_t bytes_sent; /* to usb, after compression including overhead */
65458- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
65459+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
65460+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
65461+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
65462+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
65463 };
65464
65465 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
65466diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
65467index 0993a22..32ba2fe 100644
65468--- a/include/video/uvesafb.h
65469+++ b/include/video/uvesafb.h
65470@@ -177,6 +177,7 @@ struct uvesafb_par {
65471 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
65472 u8 pmi_setpal; /* PMI for palette changes */
65473 u16 *pmi_base; /* protected mode interface location */
65474+ u8 *pmi_code; /* protected mode code location */
65475 void *pmi_start;
65476 void *pmi_pal;
65477 u8 *vbe_state_orig; /*
65478diff --git a/init/Kconfig b/init/Kconfig
65479index d07dcf9..fa47d0e 100644
65480--- a/init/Kconfig
65481+++ b/init/Kconfig
65482@@ -835,6 +835,7 @@ endif # CGROUPS
65483
65484 config CHECKPOINT_RESTORE
65485 bool "Checkpoint/restore support" if EXPERT
65486+ depends on !GRKERNSEC
65487 default n
65488 help
65489 Enables additional kernel features in a sake of checkpoint/restore.
65490@@ -1014,6 +1015,7 @@ config UIDGID_CONVERTED
65491 # Security modules
65492 depends on SECURITY_TOMOYO = n
65493 depends on SECURITY_APPARMOR = n
65494+ depends on GRKERNSEC = n
65495
65496 config UIDGID_STRICT_TYPE_CHECKS
65497 bool "Require conversions between uid/gids and their internal representation"
65498@@ -1401,7 +1403,7 @@ config SLUB_DEBUG
65499
65500 config COMPAT_BRK
65501 bool "Disable heap randomization"
65502- default y
65503+ default n
65504 help
65505 Randomizing heap placement makes heap exploits harder, but it
65506 also breaks ancient binaries (including anything libc5 based).
65507@@ -1584,7 +1586,7 @@ config INIT_ALL_POSSIBLE
65508 config STOP_MACHINE
65509 bool
65510 default y
65511- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
65512+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
65513 help
65514 Need stop_machine() primitive.
65515
65516diff --git a/init/Makefile b/init/Makefile
65517index 7bc47ee..6da2dc7 100644
65518--- a/init/Makefile
65519+++ b/init/Makefile
65520@@ -2,6 +2,9 @@
65521 # Makefile for the linux kernel.
65522 #
65523
65524+ccflags-y := $(GCC_PLUGINS_CFLAGS)
65525+asflags-y := $(GCC_PLUGINS_AFLAGS)
65526+
65527 obj-y := main.o version.o mounts.o
65528 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
65529 obj-y += noinitramfs.o
65530diff --git a/init/do_mounts.c b/init/do_mounts.c
65531index d3f0aee..c9322f5 100644
65532--- a/init/do_mounts.c
65533+++ b/init/do_mounts.c
65534@@ -336,11 +336,11 @@ static void __init get_fs_names(char *page)
65535 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
65536 {
65537 struct super_block *s;
65538- int err = sys_mount(name, "/root", fs, flags, data);
65539+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
65540 if (err)
65541 return err;
65542
65543- sys_chdir("/root");
65544+ sys_chdir((const char __force_user *)"/root");
65545 s = current->fs->pwd.dentry->d_sb;
65546 ROOT_DEV = s->s_dev;
65547 printk(KERN_INFO
65548@@ -460,18 +460,18 @@ void __init change_floppy(char *fmt, ...)
65549 va_start(args, fmt);
65550 vsprintf(buf, fmt, args);
65551 va_end(args);
65552- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
65553+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
65554 if (fd >= 0) {
65555 sys_ioctl(fd, FDEJECT, 0);
65556 sys_close(fd);
65557 }
65558 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
65559- fd = sys_open("/dev/console", O_RDWR, 0);
65560+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
65561 if (fd >= 0) {
65562 sys_ioctl(fd, TCGETS, (long)&termios);
65563 termios.c_lflag &= ~ICANON;
65564 sys_ioctl(fd, TCSETSF, (long)&termios);
65565- sys_read(fd, &c, 1);
65566+ sys_read(fd, (char __user *)&c, 1);
65567 termios.c_lflag |= ICANON;
65568 sys_ioctl(fd, TCSETSF, (long)&termios);
65569 sys_close(fd);
65570@@ -565,6 +565,6 @@ void __init prepare_namespace(void)
65571 mount_root();
65572 out:
65573 devtmpfs_mount("dev");
65574- sys_mount(".", "/", NULL, MS_MOVE, NULL);
65575- sys_chroot(".");
65576+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
65577+ sys_chroot((const char __force_user *)".");
65578 }
65579diff --git a/init/do_mounts.h b/init/do_mounts.h
65580index f5b978a..69dbfe8 100644
65581--- a/init/do_mounts.h
65582+++ b/init/do_mounts.h
65583@@ -15,15 +15,15 @@ extern int root_mountflags;
65584
65585 static inline int create_dev(char *name, dev_t dev)
65586 {
65587- sys_unlink(name);
65588- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
65589+ sys_unlink((char __force_user *)name);
65590+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
65591 }
65592
65593 #if BITS_PER_LONG == 32
65594 static inline u32 bstat(char *name)
65595 {
65596 struct stat64 stat;
65597- if (sys_stat64(name, &stat) != 0)
65598+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
65599 return 0;
65600 if (!S_ISBLK(stat.st_mode))
65601 return 0;
65602@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
65603 static inline u32 bstat(char *name)
65604 {
65605 struct stat stat;
65606- if (sys_newstat(name, &stat) != 0)
65607+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
65608 return 0;
65609 if (!S_ISBLK(stat.st_mode))
65610 return 0;
65611diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
65612index 135959a2..28a3f43 100644
65613--- a/init/do_mounts_initrd.c
65614+++ b/init/do_mounts_initrd.c
65615@@ -53,13 +53,13 @@ static void __init handle_initrd(void)
65616 create_dev("/dev/root.old", Root_RAM0);
65617 /* mount initrd on rootfs' /root */
65618 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
65619- sys_mkdir("/old", 0700);
65620- root_fd = sys_open("/", 0, 0);
65621- old_fd = sys_open("/old", 0, 0);
65622+ sys_mkdir((const char __force_user *)"/old", 0700);
65623+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
65624+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
65625 /* move initrd over / and chdir/chroot in initrd root */
65626- sys_chdir("/root");
65627- sys_mount(".", "/", NULL, MS_MOVE, NULL);
65628- sys_chroot(".");
65629+ sys_chdir((const char __force_user *)"/root");
65630+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
65631+ sys_chroot((const char __force_user *)".");
65632
65633 /*
65634 * In case that a resume from disk is carried out by linuxrc or one of
65635@@ -76,15 +76,15 @@ static void __init handle_initrd(void)
65636
65637 /* move initrd to rootfs' /old */
65638 sys_fchdir(old_fd);
65639- sys_mount("/", ".", NULL, MS_MOVE, NULL);
65640+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
65641 /* switch root and cwd back to / of rootfs */
65642 sys_fchdir(root_fd);
65643- sys_chroot(".");
65644+ sys_chroot((const char __force_user *)".");
65645 sys_close(old_fd);
65646 sys_close(root_fd);
65647
65648 if (new_decode_dev(real_root_dev) == Root_RAM0) {
65649- sys_chdir("/old");
65650+ sys_chdir((const char __force_user *)"/old");
65651 return;
65652 }
65653
65654@@ -92,17 +92,17 @@ static void __init handle_initrd(void)
65655 mount_root();
65656
65657 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
65658- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
65659+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
65660 if (!error)
65661 printk("okay\n");
65662 else {
65663- int fd = sys_open("/dev/root.old", O_RDWR, 0);
65664+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
65665 if (error == -ENOENT)
65666 printk("/initrd does not exist. Ignored.\n");
65667 else
65668 printk("failed\n");
65669 printk(KERN_NOTICE "Unmounting old root\n");
65670- sys_umount("/old", MNT_DETACH);
65671+ sys_umount((char __force_user *)"/old", MNT_DETACH);
65672 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
65673 if (fd < 0) {
65674 error = fd;
65675@@ -125,11 +125,11 @@ int __init initrd_load(void)
65676 * mounted in the normal path.
65677 */
65678 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
65679- sys_unlink("/initrd.image");
65680+ sys_unlink((const char __force_user *)"/initrd.image");
65681 handle_initrd();
65682 return 1;
65683 }
65684 }
65685- sys_unlink("/initrd.image");
65686+ sys_unlink((const char __force_user *)"/initrd.image");
65687 return 0;
65688 }
65689diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
65690index 8cb6db5..d729f50 100644
65691--- a/init/do_mounts_md.c
65692+++ b/init/do_mounts_md.c
65693@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
65694 partitioned ? "_d" : "", minor,
65695 md_setup_args[ent].device_names);
65696
65697- fd = sys_open(name, 0, 0);
65698+ fd = sys_open((char __force_user *)name, 0, 0);
65699 if (fd < 0) {
65700 printk(KERN_ERR "md: open failed - cannot start "
65701 "array %s\n", name);
65702@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
65703 * array without it
65704 */
65705 sys_close(fd);
65706- fd = sys_open(name, 0, 0);
65707+ fd = sys_open((char __force_user *)name, 0, 0);
65708 sys_ioctl(fd, BLKRRPART, 0);
65709 }
65710 sys_close(fd);
65711@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
65712
65713 wait_for_device_probe();
65714
65715- fd = sys_open("/dev/md0", 0, 0);
65716+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
65717 if (fd >= 0) {
65718 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
65719 sys_close(fd);
65720diff --git a/init/init_task.c b/init/init_task.c
65721index 8b2f399..f0797c9 100644
65722--- a/init/init_task.c
65723+++ b/init/init_task.c
65724@@ -20,5 +20,9 @@ EXPORT_SYMBOL(init_task);
65725 * Initial thread structure. Alignment of this is handled by a special
65726 * linker map entry.
65727 */
65728+#ifdef CONFIG_X86
65729+union thread_union init_thread_union __init_task_data;
65730+#else
65731 union thread_union init_thread_union __init_task_data =
65732 { INIT_THREAD_INFO(init_task) };
65733+#endif
65734diff --git a/init/initramfs.c b/init/initramfs.c
65735index 84c6bf1..8899338 100644
65736--- a/init/initramfs.c
65737+++ b/init/initramfs.c
65738@@ -84,7 +84,7 @@ static void __init free_hash(void)
65739 }
65740 }
65741
65742-static long __init do_utime(char *filename, time_t mtime)
65743+static long __init do_utime(char __force_user *filename, time_t mtime)
65744 {
65745 struct timespec t[2];
65746
65747@@ -119,7 +119,7 @@ static void __init dir_utime(void)
65748 struct dir_entry *de, *tmp;
65749 list_for_each_entry_safe(de, tmp, &dir_list, list) {
65750 list_del(&de->list);
65751- do_utime(de->name, de->mtime);
65752+ do_utime((char __force_user *)de->name, de->mtime);
65753 kfree(de->name);
65754 kfree(de);
65755 }
65756@@ -281,7 +281,7 @@ static int __init maybe_link(void)
65757 if (nlink >= 2) {
65758 char *old = find_link(major, minor, ino, mode, collected);
65759 if (old)
65760- return (sys_link(old, collected) < 0) ? -1 : 1;
65761+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
65762 }
65763 return 0;
65764 }
65765@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
65766 {
65767 struct stat st;
65768
65769- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
65770+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
65771 if (S_ISDIR(st.st_mode))
65772- sys_rmdir(path);
65773+ sys_rmdir((char __force_user *)path);
65774 else
65775- sys_unlink(path);
65776+ sys_unlink((char __force_user *)path);
65777 }
65778 }
65779
65780@@ -315,7 +315,7 @@ static int __init do_name(void)
65781 int openflags = O_WRONLY|O_CREAT;
65782 if (ml != 1)
65783 openflags |= O_TRUNC;
65784- wfd = sys_open(collected, openflags, mode);
65785+ wfd = sys_open((char __force_user *)collected, openflags, mode);
65786
65787 if (wfd >= 0) {
65788 sys_fchown(wfd, uid, gid);
65789@@ -327,17 +327,17 @@ static int __init do_name(void)
65790 }
65791 }
65792 } else if (S_ISDIR(mode)) {
65793- sys_mkdir(collected, mode);
65794- sys_chown(collected, uid, gid);
65795- sys_chmod(collected, mode);
65796+ sys_mkdir((char __force_user *)collected, mode);
65797+ sys_chown((char __force_user *)collected, uid, gid);
65798+ sys_chmod((char __force_user *)collected, mode);
65799 dir_add(collected, mtime);
65800 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
65801 S_ISFIFO(mode) || S_ISSOCK(mode)) {
65802 if (maybe_link() == 0) {
65803- sys_mknod(collected, mode, rdev);
65804- sys_chown(collected, uid, gid);
65805- sys_chmod(collected, mode);
65806- do_utime(collected, mtime);
65807+ sys_mknod((char __force_user *)collected, mode, rdev);
65808+ sys_chown((char __force_user *)collected, uid, gid);
65809+ sys_chmod((char __force_user *)collected, mode);
65810+ do_utime((char __force_user *)collected, mtime);
65811 }
65812 }
65813 return 0;
65814@@ -346,15 +346,15 @@ static int __init do_name(void)
65815 static int __init do_copy(void)
65816 {
65817 if (count >= body_len) {
65818- sys_write(wfd, victim, body_len);
65819+ sys_write(wfd, (char __force_user *)victim, body_len);
65820 sys_close(wfd);
65821- do_utime(vcollected, mtime);
65822+ do_utime((char __force_user *)vcollected, mtime);
65823 kfree(vcollected);
65824 eat(body_len);
65825 state = SkipIt;
65826 return 0;
65827 } else {
65828- sys_write(wfd, victim, count);
65829+ sys_write(wfd, (char __force_user *)victim, count);
65830 body_len -= count;
65831 eat(count);
65832 return 1;
65833@@ -365,9 +365,9 @@ static int __init do_symlink(void)
65834 {
65835 collected[N_ALIGN(name_len) + body_len] = '\0';
65836 clean_path(collected, 0);
65837- sys_symlink(collected + N_ALIGN(name_len), collected);
65838- sys_lchown(collected, uid, gid);
65839- do_utime(collected, mtime);
65840+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
65841+ sys_lchown((char __force_user *)collected, uid, gid);
65842+ do_utime((char __force_user *)collected, mtime);
65843 state = SkipIt;
65844 next_state = Reset;
65845 return 0;
65846diff --git a/init/main.c b/init/main.c
65847index b5cc0a7..8e67244 100644
65848--- a/init/main.c
65849+++ b/init/main.c
65850@@ -95,6 +95,8 @@ static inline void mark_rodata_ro(void) { }
65851 extern void tc_init(void);
65852 #endif
65853
65854+extern void grsecurity_init(void);
65855+
65856 /*
65857 * Debug helper: via this flag we know that we are in 'early bootup code'
65858 * where only the boot processor is running with IRQ disabled. This means
65859@@ -148,6 +150,51 @@ static int __init set_reset_devices(char *str)
65860
65861 __setup("reset_devices", set_reset_devices);
65862
65863+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
65864+extern char pax_enter_kernel_user[];
65865+extern char pax_exit_kernel_user[];
65866+extern pgdval_t clone_pgd_mask;
65867+#endif
65868+
65869+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
65870+static int __init setup_pax_nouderef(char *str)
65871+{
65872+#ifdef CONFIG_X86_32
65873+ unsigned int cpu;
65874+ struct desc_struct *gdt;
65875+
65876+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
65877+ gdt = get_cpu_gdt_table(cpu);
65878+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
65879+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
65880+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
65881+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
65882+ }
65883+ loadsegment(ds, __KERNEL_DS);
65884+ loadsegment(es, __KERNEL_DS);
65885+ loadsegment(ss, __KERNEL_DS);
65886+#else
65887+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
65888+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
65889+ clone_pgd_mask = ~(pgdval_t)0UL;
65890+#endif
65891+
65892+ return 0;
65893+}
65894+early_param("pax_nouderef", setup_pax_nouderef);
65895+#endif
65896+
65897+#ifdef CONFIG_PAX_SOFTMODE
65898+int pax_softmode;
65899+
65900+static int __init setup_pax_softmode(char *str)
65901+{
65902+ get_option(&str, &pax_softmode);
65903+ return 1;
65904+}
65905+__setup("pax_softmode=", setup_pax_softmode);
65906+#endif
65907+
65908 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
65909 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
65910 static const char *panic_later, *panic_param;
65911@@ -674,6 +721,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
65912 {
65913 int count = preempt_count();
65914 int ret;
65915+ const char *msg1 = "", *msg2 = "";
65916
65917 if (initcall_debug)
65918 ret = do_one_initcall_debug(fn);
65919@@ -686,15 +734,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
65920 sprintf(msgbuf, "error code %d ", ret);
65921
65922 if (preempt_count() != count) {
65923- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
65924+ msg1 = " preemption imbalance";
65925 preempt_count() = count;
65926 }
65927 if (irqs_disabled()) {
65928- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
65929+ msg2 = " disabled interrupts";
65930 local_irq_enable();
65931 }
65932- if (msgbuf[0]) {
65933- printk("initcall %pF returned with %s\n", fn, msgbuf);
65934+ if (msgbuf[0] || *msg1 || *msg2) {
65935+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
65936 }
65937
65938 return ret;
65939@@ -747,8 +795,14 @@ static void __init do_initcall_level(int level)
65940 level, level,
65941 &repair_env_string);
65942
65943- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
65944+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
65945 do_one_initcall(*fn);
65946+
65947+#ifdef CONFIG_PAX_LATENT_ENTROPY
65948+ transfer_latent_entropy();
65949+#endif
65950+
65951+ }
65952 }
65953
65954 static void __init do_initcalls(void)
65955@@ -782,8 +836,14 @@ static void __init do_pre_smp_initcalls(void)
65956 {
65957 initcall_t *fn;
65958
65959- for (fn = __initcall_start; fn < __initcall0_start; fn++)
65960+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
65961 do_one_initcall(*fn);
65962+
65963+#ifdef CONFIG_PAX_LATENT_ENTROPY
65964+ transfer_latent_entropy();
65965+#endif
65966+
65967+ }
65968 }
65969
65970 static void run_init_process(const char *init_filename)
65971@@ -865,7 +925,7 @@ static int __init kernel_init(void * unused)
65972 do_basic_setup();
65973
65974 /* Open the /dev/console on the rootfs, this should never fail */
65975- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
65976+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
65977 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
65978
65979 (void) sys_dup(0);
65980@@ -878,11 +938,13 @@ static int __init kernel_init(void * unused)
65981 if (!ramdisk_execute_command)
65982 ramdisk_execute_command = "/init";
65983
65984- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
65985+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
65986 ramdisk_execute_command = NULL;
65987 prepare_namespace();
65988 }
65989
65990+ grsecurity_init();
65991+
65992 /*
65993 * Ok, we have completed the initial bootup, and
65994 * we're essentially up and running. Get rid of the
65995diff --git a/ipc/mqueue.c b/ipc/mqueue.c
65996index 8ce5769..4666884 100644
65997--- a/ipc/mqueue.c
65998+++ b/ipc/mqueue.c
65999@@ -279,6 +279,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
66000 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
66001 info->attr.mq_msgsize);
66002
66003+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
66004 spin_lock(&mq_lock);
66005 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
66006 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
66007diff --git a/ipc/msg.c b/ipc/msg.c
66008index 7385de2..a8180e08 100644
66009--- a/ipc/msg.c
66010+++ b/ipc/msg.c
66011@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
66012 return security_msg_queue_associate(msq, msgflg);
66013 }
66014
66015+static struct ipc_ops msg_ops = {
66016+ .getnew = newque,
66017+ .associate = msg_security,
66018+ .more_checks = NULL
66019+};
66020+
66021 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
66022 {
66023 struct ipc_namespace *ns;
66024- struct ipc_ops msg_ops;
66025 struct ipc_params msg_params;
66026
66027 ns = current->nsproxy->ipc_ns;
66028
66029- msg_ops.getnew = newque;
66030- msg_ops.associate = msg_security;
66031- msg_ops.more_checks = NULL;
66032-
66033 msg_params.key = key;
66034 msg_params.flg = msgflg;
66035
66036diff --git a/ipc/sem.c b/ipc/sem.c
66037index 5215a81..cfc0cac 100644
66038--- a/ipc/sem.c
66039+++ b/ipc/sem.c
66040@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
66041 return 0;
66042 }
66043
66044+static struct ipc_ops sem_ops = {
66045+ .getnew = newary,
66046+ .associate = sem_security,
66047+ .more_checks = sem_more_checks
66048+};
66049+
66050 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
66051 {
66052 struct ipc_namespace *ns;
66053- struct ipc_ops sem_ops;
66054 struct ipc_params sem_params;
66055
66056 ns = current->nsproxy->ipc_ns;
66057@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
66058 if (nsems < 0 || nsems > ns->sc_semmsl)
66059 return -EINVAL;
66060
66061- sem_ops.getnew = newary;
66062- sem_ops.associate = sem_security;
66063- sem_ops.more_checks = sem_more_checks;
66064-
66065 sem_params.key = key;
66066 sem_params.flg = semflg;
66067 sem_params.u.nsems = nsems;
66068diff --git a/ipc/shm.c b/ipc/shm.c
66069index 41c1285..cf6404c 100644
66070--- a/ipc/shm.c
66071+++ b/ipc/shm.c
66072@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
66073 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
66074 #endif
66075
66076+#ifdef CONFIG_GRKERNSEC
66077+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
66078+ const time_t shm_createtime, const uid_t cuid,
66079+ const int shmid);
66080+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
66081+ const time_t shm_createtime);
66082+#endif
66083+
66084 void shm_init_ns(struct ipc_namespace *ns)
66085 {
66086 ns->shm_ctlmax = SHMMAX;
66087@@ -520,6 +528,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
66088 shp->shm_lprid = 0;
66089 shp->shm_atim = shp->shm_dtim = 0;
66090 shp->shm_ctim = get_seconds();
66091+#ifdef CONFIG_GRKERNSEC
66092+ {
66093+ struct timespec timeval;
66094+ do_posix_clock_monotonic_gettime(&timeval);
66095+
66096+ shp->shm_createtime = timeval.tv_sec;
66097+ }
66098+#endif
66099 shp->shm_segsz = size;
66100 shp->shm_nattch = 0;
66101 shp->shm_file = file;
66102@@ -571,18 +587,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
66103 return 0;
66104 }
66105
66106+static struct ipc_ops shm_ops = {
66107+ .getnew = newseg,
66108+ .associate = shm_security,
66109+ .more_checks = shm_more_checks
66110+};
66111+
66112 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
66113 {
66114 struct ipc_namespace *ns;
66115- struct ipc_ops shm_ops;
66116 struct ipc_params shm_params;
66117
66118 ns = current->nsproxy->ipc_ns;
66119
66120- shm_ops.getnew = newseg;
66121- shm_ops.associate = shm_security;
66122- shm_ops.more_checks = shm_more_checks;
66123-
66124 shm_params.key = key;
66125 shm_params.flg = shmflg;
66126 shm_params.u.size = size;
66127@@ -1000,6 +1017,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
66128 f_mode = FMODE_READ | FMODE_WRITE;
66129 }
66130 if (shmflg & SHM_EXEC) {
66131+
66132+#ifdef CONFIG_PAX_MPROTECT
66133+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
66134+ goto out;
66135+#endif
66136+
66137 prot |= PROT_EXEC;
66138 acc_mode |= S_IXUGO;
66139 }
66140@@ -1023,9 +1046,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
66141 if (err)
66142 goto out_unlock;
66143
66144+#ifdef CONFIG_GRKERNSEC
66145+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
66146+ shp->shm_perm.cuid, shmid) ||
66147+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
66148+ err = -EACCES;
66149+ goto out_unlock;
66150+ }
66151+#endif
66152+
66153 path = shp->shm_file->f_path;
66154 path_get(&path);
66155 shp->shm_nattch++;
66156+#ifdef CONFIG_GRKERNSEC
66157+ shp->shm_lapid = current->pid;
66158+#endif
66159 size = i_size_read(path.dentry->d_inode);
66160 shm_unlock(shp);
66161
66162diff --git a/kernel/acct.c b/kernel/acct.c
66163index 02e6167..54824f7 100644
66164--- a/kernel/acct.c
66165+++ b/kernel/acct.c
66166@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
66167 */
66168 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
66169 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
66170- file->f_op->write(file, (char *)&ac,
66171+ file->f_op->write(file, (char __force_user *)&ac,
66172 sizeof(acct_t), &file->f_pos);
66173 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
66174 set_fs(fs);
66175diff --git a/kernel/audit.c b/kernel/audit.c
66176index 1c7f2c6..9ba5359 100644
66177--- a/kernel/audit.c
66178+++ b/kernel/audit.c
66179@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
66180 3) suppressed due to audit_rate_limit
66181 4) suppressed due to audit_backlog_limit
66182 */
66183-static atomic_t audit_lost = ATOMIC_INIT(0);
66184+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
66185
66186 /* The netlink socket. */
66187 static struct sock *audit_sock;
66188@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
66189 unsigned long now;
66190 int print;
66191
66192- atomic_inc(&audit_lost);
66193+ atomic_inc_unchecked(&audit_lost);
66194
66195 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
66196
66197@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
66198 printk(KERN_WARNING
66199 "audit: audit_lost=%d audit_rate_limit=%d "
66200 "audit_backlog_limit=%d\n",
66201- atomic_read(&audit_lost),
66202+ atomic_read_unchecked(&audit_lost),
66203 audit_rate_limit,
66204 audit_backlog_limit);
66205 audit_panic(message);
66206@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
66207 status_set.pid = audit_pid;
66208 status_set.rate_limit = audit_rate_limit;
66209 status_set.backlog_limit = audit_backlog_limit;
66210- status_set.lost = atomic_read(&audit_lost);
66211+ status_set.lost = atomic_read_unchecked(&audit_lost);
66212 status_set.backlog = skb_queue_len(&audit_skb_queue);
66213 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
66214 &status_set, sizeof(status_set));
66215diff --git a/kernel/auditsc.c b/kernel/auditsc.c
66216index 4b96415..d8c16ee 100644
66217--- a/kernel/auditsc.c
66218+++ b/kernel/auditsc.c
66219@@ -2289,7 +2289,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
66220 }
66221
66222 /* global counter which is incremented every time something logs in */
66223-static atomic_t session_id = ATOMIC_INIT(0);
66224+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
66225
66226 /**
66227 * audit_set_loginuid - set current task's audit_context loginuid
66228@@ -2313,7 +2313,7 @@ int audit_set_loginuid(uid_t loginuid)
66229 return -EPERM;
66230 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
66231
66232- sessionid = atomic_inc_return(&session_id);
66233+ sessionid = atomic_inc_return_unchecked(&session_id);
66234 if (context && context->in_syscall) {
66235 struct audit_buffer *ab;
66236
66237diff --git a/kernel/capability.c b/kernel/capability.c
66238index 493d972..ea17248 100644
66239--- a/kernel/capability.c
66240+++ b/kernel/capability.c
66241@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
66242 * before modification is attempted and the application
66243 * fails.
66244 */
66245+ if (tocopy > ARRAY_SIZE(kdata))
66246+ return -EFAULT;
66247+
66248 if (copy_to_user(dataptr, kdata, tocopy
66249 * sizeof(struct __user_cap_data_struct))) {
66250 return -EFAULT;
66251@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
66252 int ret;
66253
66254 rcu_read_lock();
66255- ret = security_capable(__task_cred(t), ns, cap);
66256+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
66257+ gr_task_is_capable(t, __task_cred(t), cap);
66258 rcu_read_unlock();
66259
66260- return (ret == 0);
66261+ return ret;
66262 }
66263
66264 /**
66265@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
66266 int ret;
66267
66268 rcu_read_lock();
66269- ret = security_capable_noaudit(__task_cred(t), ns, cap);
66270+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
66271 rcu_read_unlock();
66272
66273- return (ret == 0);
66274+ return ret;
66275 }
66276
66277 /**
66278@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
66279 BUG();
66280 }
66281
66282- if (security_capable(current_cred(), ns, cap) == 0) {
66283+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
66284 current->flags |= PF_SUPERPRIV;
66285 return true;
66286 }
66287@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
66288 }
66289 EXPORT_SYMBOL(ns_capable);
66290
66291+bool ns_capable_nolog(struct user_namespace *ns, int cap)
66292+{
66293+ if (unlikely(!cap_valid(cap))) {
66294+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
66295+ BUG();
66296+ }
66297+
66298+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
66299+ current->flags |= PF_SUPERPRIV;
66300+ return true;
66301+ }
66302+ return false;
66303+}
66304+EXPORT_SYMBOL(ns_capable_nolog);
66305+
66306 /**
66307 * capable - Determine if the current task has a superior capability in effect
66308 * @cap: The capability to be tested for
66309@@ -408,6 +427,12 @@ bool capable(int cap)
66310 }
66311 EXPORT_SYMBOL(capable);
66312
66313+bool capable_nolog(int cap)
66314+{
66315+ return ns_capable_nolog(&init_user_ns, cap);
66316+}
66317+EXPORT_SYMBOL(capable_nolog);
66318+
66319 /**
66320 * nsown_capable - Check superior capability to one's own user_ns
66321 * @cap: The capability in question
66322@@ -440,3 +465,10 @@ bool inode_capable(const struct inode *inode, int cap)
66323
66324 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
66325 }
66326+
66327+bool inode_capable_nolog(const struct inode *inode, int cap)
66328+{
66329+ struct user_namespace *ns = current_user_ns();
66330+
66331+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
66332+}
66333diff --git a/kernel/compat.c b/kernel/compat.c
66334index c28a306..b4d0cf3 100644
66335--- a/kernel/compat.c
66336+++ b/kernel/compat.c
66337@@ -13,6 +13,7 @@
66338
66339 #include <linux/linkage.h>
66340 #include <linux/compat.h>
66341+#include <linux/module.h>
66342 #include <linux/errno.h>
66343 #include <linux/time.h>
66344 #include <linux/signal.h>
66345@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
66346 mm_segment_t oldfs;
66347 long ret;
66348
66349- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
66350+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
66351 oldfs = get_fs();
66352 set_fs(KERNEL_DS);
66353 ret = hrtimer_nanosleep_restart(restart);
66354@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
66355 oldfs = get_fs();
66356 set_fs(KERNEL_DS);
66357 ret = hrtimer_nanosleep(&tu,
66358- rmtp ? (struct timespec __user *)&rmt : NULL,
66359+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
66360 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
66361 set_fs(oldfs);
66362
66363@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
66364 mm_segment_t old_fs = get_fs();
66365
66366 set_fs(KERNEL_DS);
66367- ret = sys_sigpending((old_sigset_t __user *) &s);
66368+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
66369 set_fs(old_fs);
66370 if (ret == 0)
66371 ret = put_user(s, set);
66372@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
66373 mm_segment_t old_fs = get_fs();
66374
66375 set_fs(KERNEL_DS);
66376- ret = sys_old_getrlimit(resource, &r);
66377+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
66378 set_fs(old_fs);
66379
66380 if (!ret) {
66381@@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
66382 mm_segment_t old_fs = get_fs();
66383
66384 set_fs(KERNEL_DS);
66385- ret = sys_getrusage(who, (struct rusage __user *) &r);
66386+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
66387 set_fs(old_fs);
66388
66389 if (ret)
66390@@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
66391 set_fs (KERNEL_DS);
66392 ret = sys_wait4(pid,
66393 (stat_addr ?
66394- (unsigned int __user *) &status : NULL),
66395- options, (struct rusage __user *) &r);
66396+ (unsigned int __force_user *) &status : NULL),
66397+ options, (struct rusage __force_user *) &r);
66398 set_fs (old_fs);
66399
66400 if (ret > 0) {
66401@@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
66402 memset(&info, 0, sizeof(info));
66403
66404 set_fs(KERNEL_DS);
66405- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
66406- uru ? (struct rusage __user *)&ru : NULL);
66407+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
66408+ uru ? (struct rusage __force_user *)&ru : NULL);
66409 set_fs(old_fs);
66410
66411 if ((ret < 0) || (info.si_signo == 0))
66412@@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
66413 oldfs = get_fs();
66414 set_fs(KERNEL_DS);
66415 err = sys_timer_settime(timer_id, flags,
66416- (struct itimerspec __user *) &newts,
66417- (struct itimerspec __user *) &oldts);
66418+ (struct itimerspec __force_user *) &newts,
66419+ (struct itimerspec __force_user *) &oldts);
66420 set_fs(oldfs);
66421 if (!err && old && put_compat_itimerspec(old, &oldts))
66422 return -EFAULT;
66423@@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
66424 oldfs = get_fs();
66425 set_fs(KERNEL_DS);
66426 err = sys_timer_gettime(timer_id,
66427- (struct itimerspec __user *) &ts);
66428+ (struct itimerspec __force_user *) &ts);
66429 set_fs(oldfs);
66430 if (!err && put_compat_itimerspec(setting, &ts))
66431 return -EFAULT;
66432@@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
66433 oldfs = get_fs();
66434 set_fs(KERNEL_DS);
66435 err = sys_clock_settime(which_clock,
66436- (struct timespec __user *) &ts);
66437+ (struct timespec __force_user *) &ts);
66438 set_fs(oldfs);
66439 return err;
66440 }
66441@@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
66442 oldfs = get_fs();
66443 set_fs(KERNEL_DS);
66444 err = sys_clock_gettime(which_clock,
66445- (struct timespec __user *) &ts);
66446+ (struct timespec __force_user *) &ts);
66447 set_fs(oldfs);
66448 if (!err && put_compat_timespec(&ts, tp))
66449 return -EFAULT;
66450@@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
66451
66452 oldfs = get_fs();
66453 set_fs(KERNEL_DS);
66454- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
66455+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
66456 set_fs(oldfs);
66457
66458 err = compat_put_timex(utp, &txc);
66459@@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
66460 oldfs = get_fs();
66461 set_fs(KERNEL_DS);
66462 err = sys_clock_getres(which_clock,
66463- (struct timespec __user *) &ts);
66464+ (struct timespec __force_user *) &ts);
66465 set_fs(oldfs);
66466 if (!err && tp && put_compat_timespec(&ts, tp))
66467 return -EFAULT;
66468@@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
66469 long err;
66470 mm_segment_t oldfs;
66471 struct timespec tu;
66472- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
66473+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
66474
66475- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
66476+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
66477 oldfs = get_fs();
66478 set_fs(KERNEL_DS);
66479 err = clock_nanosleep_restart(restart);
66480@@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
66481 oldfs = get_fs();
66482 set_fs(KERNEL_DS);
66483 err = sys_clock_nanosleep(which_clock, flags,
66484- (struct timespec __user *) &in,
66485- (struct timespec __user *) &out);
66486+ (struct timespec __force_user *) &in,
66487+ (struct timespec __force_user *) &out);
66488 set_fs(oldfs);
66489
66490 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
66491diff --git a/kernel/configs.c b/kernel/configs.c
66492index 42e8fa0..9e7406b 100644
66493--- a/kernel/configs.c
66494+++ b/kernel/configs.c
66495@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
66496 struct proc_dir_entry *entry;
66497
66498 /* create the current config file */
66499+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66500+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
66501+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
66502+ &ikconfig_file_ops);
66503+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66504+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
66505+ &ikconfig_file_ops);
66506+#endif
66507+#else
66508 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
66509 &ikconfig_file_ops);
66510+#endif
66511+
66512 if (!entry)
66513 return -ENOMEM;
66514
66515diff --git a/kernel/cred.c b/kernel/cred.c
66516index de728ac..e3c267c 100644
66517--- a/kernel/cred.c
66518+++ b/kernel/cred.c
66519@@ -207,6 +207,16 @@ void exit_creds(struct task_struct *tsk)
66520 validate_creds(cred);
66521 alter_cred_subscribers(cred, -1);
66522 put_cred(cred);
66523+
66524+#ifdef CONFIG_GRKERNSEC_SETXID
66525+ cred = (struct cred *) tsk->delayed_cred;
66526+ if (cred != NULL) {
66527+ tsk->delayed_cred = NULL;
66528+ validate_creds(cred);
66529+ alter_cred_subscribers(cred, -1);
66530+ put_cred(cred);
66531+ }
66532+#endif
66533 }
66534
66535 /**
66536@@ -469,7 +479,7 @@ error_put:
66537 * Always returns 0 thus allowing this function to be tail-called at the end
66538 * of, say, sys_setgid().
66539 */
66540-int commit_creds(struct cred *new)
66541+static int __commit_creds(struct cred *new)
66542 {
66543 struct task_struct *task = current;
66544 const struct cred *old = task->real_cred;
66545@@ -488,6 +498,8 @@ int commit_creds(struct cred *new)
66546
66547 get_cred(new); /* we will require a ref for the subj creds too */
66548
66549+ gr_set_role_label(task, new->uid, new->gid);
66550+
66551 /* dumpability changes */
66552 if (!uid_eq(old->euid, new->euid) ||
66553 !gid_eq(old->egid, new->egid) ||
66554@@ -537,6 +549,101 @@ int commit_creds(struct cred *new)
66555 put_cred(old);
66556 return 0;
66557 }
66558+#ifdef CONFIG_GRKERNSEC_SETXID
66559+extern int set_user(struct cred *new);
66560+
66561+void gr_delayed_cred_worker(void)
66562+{
66563+ const struct cred *new = current->delayed_cred;
66564+ struct cred *ncred;
66565+
66566+ current->delayed_cred = NULL;
66567+
66568+ if (current_uid() && new != NULL) {
66569+ // from doing get_cred on it when queueing this
66570+ put_cred(new);
66571+ return;
66572+ } else if (new == NULL)
66573+ return;
66574+
66575+ ncred = prepare_creds();
66576+ if (!ncred)
66577+ goto die;
66578+ // uids
66579+ ncred->uid = new->uid;
66580+ ncred->euid = new->euid;
66581+ ncred->suid = new->suid;
66582+ ncred->fsuid = new->fsuid;
66583+ // gids
66584+ ncred->gid = new->gid;
66585+ ncred->egid = new->egid;
66586+ ncred->sgid = new->sgid;
66587+ ncred->fsgid = new->fsgid;
66588+ // groups
66589+ if (set_groups(ncred, new->group_info) < 0) {
66590+ abort_creds(ncred);
66591+ goto die;
66592+ }
66593+ // caps
66594+ ncred->securebits = new->securebits;
66595+ ncred->cap_inheritable = new->cap_inheritable;
66596+ ncred->cap_permitted = new->cap_permitted;
66597+ ncred->cap_effective = new->cap_effective;
66598+ ncred->cap_bset = new->cap_bset;
66599+
66600+ if (set_user(ncred)) {
66601+ abort_creds(ncred);
66602+ goto die;
66603+ }
66604+
66605+ // from doing get_cred on it when queueing this
66606+ put_cred(new);
66607+
66608+ __commit_creds(ncred);
66609+ return;
66610+die:
66611+ // from doing get_cred on it when queueing this
66612+ put_cred(new);
66613+ do_group_exit(SIGKILL);
66614+}
66615+#endif
66616+
66617+int commit_creds(struct cred *new)
66618+{
66619+#ifdef CONFIG_GRKERNSEC_SETXID
66620+ int ret;
66621+ int schedule_it = 0;
66622+ struct task_struct *t;
66623+
66624+ /* we won't get called with tasklist_lock held for writing
66625+ and interrupts disabled as the cred struct in that case is
66626+ init_cred
66627+ */
66628+ if (grsec_enable_setxid && !current_is_single_threaded() &&
66629+ !current_uid() && new->uid) {
66630+ schedule_it = 1;
66631+ }
66632+ ret = __commit_creds(new);
66633+ if (schedule_it) {
66634+ rcu_read_lock();
66635+ read_lock(&tasklist_lock);
66636+ for (t = next_thread(current); t != current;
66637+ t = next_thread(t)) {
66638+ if (t->delayed_cred == NULL) {
66639+ t->delayed_cred = get_cred(new);
66640+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
66641+ set_tsk_need_resched(t);
66642+ }
66643+ }
66644+ read_unlock(&tasklist_lock);
66645+ rcu_read_unlock();
66646+ }
66647+ return ret;
66648+#else
66649+ return __commit_creds(new);
66650+#endif
66651+}
66652+
66653 EXPORT_SYMBOL(commit_creds);
66654
66655 /**
66656diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
66657index 0557f24..1a00d9a 100644
66658--- a/kernel/debug/debug_core.c
66659+++ b/kernel/debug/debug_core.c
66660@@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
66661 */
66662 static atomic_t masters_in_kgdb;
66663 static atomic_t slaves_in_kgdb;
66664-static atomic_t kgdb_break_tasklet_var;
66665+static atomic_unchecked_t kgdb_break_tasklet_var;
66666 atomic_t kgdb_setting_breakpoint;
66667
66668 struct task_struct *kgdb_usethread;
66669@@ -132,7 +132,7 @@ int kgdb_single_step;
66670 static pid_t kgdb_sstep_pid;
66671
66672 /* to keep track of the CPU which is doing the single stepping*/
66673-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
66674+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
66675
66676 /*
66677 * If you are debugging a problem where roundup (the collection of
66678@@ -540,7 +540,7 @@ return_normal:
66679 * kernel will only try for the value of sstep_tries before
66680 * giving up and continuing on.
66681 */
66682- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
66683+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
66684 (kgdb_info[cpu].task &&
66685 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
66686 atomic_set(&kgdb_active, -1);
66687@@ -634,8 +634,8 @@ cpu_master_loop:
66688 }
66689
66690 kgdb_restore:
66691- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
66692- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
66693+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
66694+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
66695 if (kgdb_info[sstep_cpu].task)
66696 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
66697 else
66698@@ -861,18 +861,18 @@ static void kgdb_unregister_callbacks(void)
66699 static void kgdb_tasklet_bpt(unsigned long ing)
66700 {
66701 kgdb_breakpoint();
66702- atomic_set(&kgdb_break_tasklet_var, 0);
66703+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
66704 }
66705
66706 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
66707
66708 void kgdb_schedule_breakpoint(void)
66709 {
66710- if (atomic_read(&kgdb_break_tasklet_var) ||
66711+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
66712 atomic_read(&kgdb_active) != -1 ||
66713 atomic_read(&kgdb_setting_breakpoint))
66714 return;
66715- atomic_inc(&kgdb_break_tasklet_var);
66716+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
66717 tasklet_schedule(&kgdb_tasklet_breakpoint);
66718 }
66719 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
66720diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
66721index 1f91413..362a0a1 100644
66722--- a/kernel/debug/kdb/kdb_main.c
66723+++ b/kernel/debug/kdb/kdb_main.c
66724@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
66725 list_for_each_entry(mod, kdb_modules, list) {
66726
66727 kdb_printf("%-20s%8u 0x%p ", mod->name,
66728- mod->core_size, (void *)mod);
66729+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
66730 #ifdef CONFIG_MODULE_UNLOAD
66731 kdb_printf("%4ld ", module_refcount(mod));
66732 #endif
66733@@ -1994,7 +1994,7 @@ static int kdb_lsmod(int argc, const char **argv)
66734 kdb_printf(" (Loading)");
66735 else
66736 kdb_printf(" (Live)");
66737- kdb_printf(" 0x%p", mod->module_core);
66738+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
66739
66740 #ifdef CONFIG_MODULE_UNLOAD
66741 {
66742diff --git a/kernel/events/core.c b/kernel/events/core.c
66743index d7d71d6..b6ec863 100644
66744--- a/kernel/events/core.c
66745+++ b/kernel/events/core.c
66746@@ -181,7 +181,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
66747 return 0;
66748 }
66749
66750-static atomic64_t perf_event_id;
66751+static atomic64_unchecked_t perf_event_id;
66752
66753 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
66754 enum event_type_t event_type);
66755@@ -2663,7 +2663,7 @@ static void __perf_event_read(void *info)
66756
66757 static inline u64 perf_event_count(struct perf_event *event)
66758 {
66759- return local64_read(&event->count) + atomic64_read(&event->child_count);
66760+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
66761 }
66762
66763 static u64 perf_event_read(struct perf_event *event)
66764@@ -2933,12 +2933,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
66765 /*
66766 * Called when the last reference to the file is gone.
66767 */
66768-static int perf_release(struct inode *inode, struct file *file)
66769+static void put_event(struct perf_event *event)
66770 {
66771- struct perf_event *event = file->private_data;
66772 struct task_struct *owner;
66773
66774- file->private_data = NULL;
66775+ if (!atomic_long_dec_and_test(&event->refcount))
66776+ return;
66777
66778 rcu_read_lock();
66779 owner = ACCESS_ONCE(event->owner);
66780@@ -2973,7 +2973,13 @@ static int perf_release(struct inode *inode, struct file *file)
66781 put_task_struct(owner);
66782 }
66783
66784- return perf_event_release_kernel(event);
66785+ perf_event_release_kernel(event);
66786+}
66787+
66788+static int perf_release(struct inode *inode, struct file *file)
66789+{
66790+ put_event(file->private_data);
66791+ return 0;
66792 }
66793
66794 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
66795@@ -2987,9 +2993,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
66796 mutex_lock(&event->child_mutex);
66797 total += perf_event_read(event);
66798 *enabled += event->total_time_enabled +
66799- atomic64_read(&event->child_total_time_enabled);
66800+ atomic64_read_unchecked(&event->child_total_time_enabled);
66801 *running += event->total_time_running +
66802- atomic64_read(&event->child_total_time_running);
66803+ atomic64_read_unchecked(&event->child_total_time_running);
66804
66805 list_for_each_entry(child, &event->child_list, child_list) {
66806 total += perf_event_read(child);
66807@@ -3225,7 +3231,7 @@ unlock:
66808
66809 static const struct file_operations perf_fops;
66810
66811-static struct perf_event *perf_fget_light(int fd, int *fput_needed)
66812+static struct file *perf_fget_light(int fd, int *fput_needed)
66813 {
66814 struct file *file;
66815
66816@@ -3239,7 +3245,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed)
66817 return ERR_PTR(-EBADF);
66818 }
66819
66820- return file->private_data;
66821+ return file;
66822 }
66823
66824 static int perf_event_set_output(struct perf_event *event,
66825@@ -3271,19 +3277,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
66826
66827 case PERF_EVENT_IOC_SET_OUTPUT:
66828 {
66829+ struct file *output_file = NULL;
66830 struct perf_event *output_event = NULL;
66831 int fput_needed = 0;
66832 int ret;
66833
66834 if (arg != -1) {
66835- output_event = perf_fget_light(arg, &fput_needed);
66836- if (IS_ERR(output_event))
66837- return PTR_ERR(output_event);
66838+ output_file = perf_fget_light(arg, &fput_needed);
66839+ if (IS_ERR(output_file))
66840+ return PTR_ERR(output_file);
66841+ output_event = output_file->private_data;
66842 }
66843
66844 ret = perf_event_set_output(event, output_event);
66845 if (output_event)
66846- fput_light(output_event->filp, fput_needed);
66847+ fput_light(output_file, fput_needed);
66848
66849 return ret;
66850 }
66851@@ -3396,10 +3404,10 @@ void perf_event_update_userpage(struct perf_event *event)
66852 userpg->offset -= local64_read(&event->hw.prev_count);
66853
66854 userpg->time_enabled = enabled +
66855- atomic64_read(&event->child_total_time_enabled);
66856+ atomic64_read_unchecked(&event->child_total_time_enabled);
66857
66858 userpg->time_running = running +
66859- atomic64_read(&event->child_total_time_running);
66860+ atomic64_read_unchecked(&event->child_total_time_running);
66861
66862 arch_perf_update_userpage(userpg, now);
66863
66864@@ -3832,11 +3840,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
66865 values[n++] = perf_event_count(event);
66866 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
66867 values[n++] = enabled +
66868- atomic64_read(&event->child_total_time_enabled);
66869+ atomic64_read_unchecked(&event->child_total_time_enabled);
66870 }
66871 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
66872 values[n++] = running +
66873- atomic64_read(&event->child_total_time_running);
66874+ atomic64_read_unchecked(&event->child_total_time_running);
66875 }
66876 if (read_format & PERF_FORMAT_ID)
66877 values[n++] = primary_event_id(event);
66878@@ -4514,12 +4522,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
66879 * need to add enough zero bytes after the string to handle
66880 * the 64bit alignment we do later.
66881 */
66882- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
66883+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
66884 if (!buf) {
66885 name = strncpy(tmp, "//enomem", sizeof(tmp));
66886 goto got_name;
66887 }
66888- name = d_path(&file->f_path, buf, PATH_MAX);
66889+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
66890 if (IS_ERR(name)) {
66891 name = strncpy(tmp, "//toolong", sizeof(tmp));
66892 goto got_name;
66893@@ -5922,6 +5930,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
66894
66895 mutex_init(&event->mmap_mutex);
66896
66897+ atomic_long_set(&event->refcount, 1);
66898 event->cpu = cpu;
66899 event->attr = *attr;
66900 event->group_leader = group_leader;
66901@@ -5931,7 +5940,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
66902 event->parent = parent_event;
66903
66904 event->ns = get_pid_ns(current->nsproxy->pid_ns);
66905- event->id = atomic64_inc_return(&perf_event_id);
66906+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
66907
66908 event->state = PERF_EVENT_STATE_INACTIVE;
66909
66910@@ -6232,12 +6241,12 @@ SYSCALL_DEFINE5(perf_event_open,
66911 return event_fd;
66912
66913 if (group_fd != -1) {
66914- group_leader = perf_fget_light(group_fd, &fput_needed);
66915- if (IS_ERR(group_leader)) {
66916- err = PTR_ERR(group_leader);
66917+ group_file = perf_fget_light(group_fd, &fput_needed);
66918+ if (IS_ERR(group_file)) {
66919+ err = PTR_ERR(group_file);
66920 goto err_fd;
66921 }
66922- group_file = group_leader->filp;
66923+ group_leader = group_file->private_data;
66924 if (flags & PERF_FLAG_FD_OUTPUT)
66925 output_event = group_leader;
66926 if (flags & PERF_FLAG_FD_NO_GROUP)
66927@@ -6372,7 +6381,6 @@ SYSCALL_DEFINE5(perf_event_open,
66928 put_ctx(gctx);
66929 }
66930
66931- event->filp = event_file;
66932 WARN_ON_ONCE(ctx->parent_ctx);
66933 mutex_lock(&ctx->mutex);
66934
66935@@ -6462,7 +6470,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
66936 goto err_free;
66937 }
66938
66939- event->filp = NULL;
66940 WARN_ON_ONCE(ctx->parent_ctx);
66941 mutex_lock(&ctx->mutex);
66942 perf_install_in_context(ctx, event, cpu);
66943@@ -6493,10 +6500,10 @@ static void sync_child_event(struct perf_event *child_event,
66944 /*
66945 * Add back the child's count to the parent's count:
66946 */
66947- atomic64_add(child_val, &parent_event->child_count);
66948- atomic64_add(child_event->total_time_enabled,
66949+ atomic64_add_unchecked(child_val, &parent_event->child_count);
66950+ atomic64_add_unchecked(child_event->total_time_enabled,
66951 &parent_event->child_total_time_enabled);
66952- atomic64_add(child_event->total_time_running,
66953+ atomic64_add_unchecked(child_event->total_time_running,
66954 &parent_event->child_total_time_running);
66955
66956 /*
66957@@ -6511,7 +6518,7 @@ static void sync_child_event(struct perf_event *child_event,
66958 * Release the parent event, if this was the last
66959 * reference to it.
66960 */
66961- fput(parent_event->filp);
66962+ put_event(parent_event);
66963 }
66964
66965 static void
66966@@ -6587,9 +6594,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
66967 *
66968 * __perf_event_exit_task()
66969 * sync_child_event()
66970- * fput(parent_event->filp)
66971- * perf_release()
66972- * mutex_lock(&ctx->mutex)
66973+ * put_event()
66974+ * mutex_lock(&ctx->mutex)
66975 *
66976 * But since its the parent context it won't be the same instance.
66977 */
66978@@ -6657,7 +6663,7 @@ static void perf_free_event(struct perf_event *event,
66979 list_del_init(&event->child_list);
66980 mutex_unlock(&parent->child_mutex);
66981
66982- fput(parent->filp);
66983+ put_event(parent);
66984
66985 perf_group_detach(event);
66986 list_del_event(event, ctx);
66987@@ -6737,6 +6743,12 @@ inherit_event(struct perf_event *parent_event,
66988 NULL, NULL);
66989 if (IS_ERR(child_event))
66990 return child_event;
66991+
66992+ if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
66993+ free_event(child_event);
66994+ return NULL;
66995+ }
66996+
66997 get_ctx(child_ctx);
66998
66999 /*
67000@@ -6778,14 +6790,6 @@ inherit_event(struct perf_event *parent_event,
67001 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
67002
67003 /*
67004- * Get a reference to the parent filp - we will fput it
67005- * when the child event exits. This is safe to do because
67006- * we are in the parent and we know that the filp still
67007- * exists and has a nonzero count:
67008- */
67009- atomic_long_inc(&parent_event->filp->f_count);
67010-
67011- /*
67012 * Link this into the parent event's child list
67013 */
67014 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
67015diff --git a/kernel/exit.c b/kernel/exit.c
67016index 46ce8da..c648f3a 100644
67017--- a/kernel/exit.c
67018+++ b/kernel/exit.c
67019@@ -59,6 +59,10 @@
67020 #include <asm/pgtable.h>
67021 #include <asm/mmu_context.h>
67022
67023+#ifdef CONFIG_GRKERNSEC
67024+extern rwlock_t grsec_exec_file_lock;
67025+#endif
67026+
67027 static void exit_mm(struct task_struct * tsk);
67028
67029 static void __unhash_process(struct task_struct *p, bool group_dead)
67030@@ -182,6 +186,10 @@ void release_task(struct task_struct * p)
67031 struct task_struct *leader;
67032 int zap_leader;
67033 repeat:
67034+#ifdef CONFIG_NET
67035+ gr_del_task_from_ip_table(p);
67036+#endif
67037+
67038 /* don't need to get the RCU readlock here - the process is dead and
67039 * can't be modifying its own credentials. But shut RCU-lockdep up */
67040 rcu_read_lock();
67041@@ -394,7 +402,7 @@ int allow_signal(int sig)
67042 * know it'll be handled, so that they don't get converted to
67043 * SIGKILL or just silently dropped.
67044 */
67045- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
67046+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
67047 recalc_sigpending();
67048 spin_unlock_irq(&current->sighand->siglock);
67049 return 0;
67050@@ -430,6 +438,17 @@ void daemonize(const char *name, ...)
67051 vsnprintf(current->comm, sizeof(current->comm), name, args);
67052 va_end(args);
67053
67054+#ifdef CONFIG_GRKERNSEC
67055+ write_lock(&grsec_exec_file_lock);
67056+ if (current->exec_file) {
67057+ fput(current->exec_file);
67058+ current->exec_file = NULL;
67059+ }
67060+ write_unlock(&grsec_exec_file_lock);
67061+#endif
67062+
67063+ gr_set_kernel_label(current);
67064+
67065 /*
67066 * If we were started as result of loading a module, close all of the
67067 * user space pages. We don't need them, and if we didn't close them
67068@@ -907,6 +926,8 @@ void do_exit(long code)
67069 struct task_struct *tsk = current;
67070 int group_dead;
67071
67072+ set_fs(USER_DS);
67073+
67074 profile_task_exit(tsk);
67075
67076 WARN_ON(blk_needs_flush_plug(tsk));
67077@@ -923,7 +944,6 @@ void do_exit(long code)
67078 * mm_release()->clear_child_tid() from writing to a user-controlled
67079 * kernel address.
67080 */
67081- set_fs(USER_DS);
67082
67083 ptrace_event(PTRACE_EVENT_EXIT, code);
67084
67085@@ -985,6 +1005,9 @@ void do_exit(long code)
67086 tsk->exit_code = code;
67087 taskstats_exit(tsk, group_dead);
67088
67089+ gr_acl_handle_psacct(tsk, code);
67090+ gr_acl_handle_exit();
67091+
67092 exit_mm(tsk);
67093
67094 if (group_dead)
67095@@ -1101,7 +1124,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
67096 * Take down every thread in the group. This is called by fatal signals
67097 * as well as by sys_exit_group (below).
67098 */
67099-void
67100+__noreturn void
67101 do_group_exit(int exit_code)
67102 {
67103 struct signal_struct *sig = current->signal;
67104diff --git a/kernel/fork.c b/kernel/fork.c
67105index f9d0499..e4f8f44 100644
67106--- a/kernel/fork.c
67107+++ b/kernel/fork.c
67108@@ -321,7 +321,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
67109 *stackend = STACK_END_MAGIC; /* for overflow detection */
67110
67111 #ifdef CONFIG_CC_STACKPROTECTOR
67112- tsk->stack_canary = get_random_int();
67113+ tsk->stack_canary = pax_get_random_long();
67114 #endif
67115
67116 /*
67117@@ -345,13 +345,78 @@ out:
67118 }
67119
67120 #ifdef CONFIG_MMU
67121+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
67122+{
67123+ struct vm_area_struct *tmp;
67124+ unsigned long charge;
67125+ struct mempolicy *pol;
67126+ struct file *file;
67127+
67128+ charge = 0;
67129+ if (mpnt->vm_flags & VM_ACCOUNT) {
67130+ unsigned long len;
67131+ len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
67132+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
67133+ goto fail_nomem;
67134+ charge = len;
67135+ }
67136+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
67137+ if (!tmp)
67138+ goto fail_nomem;
67139+ *tmp = *mpnt;
67140+ tmp->vm_mm = mm;
67141+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
67142+ pol = mpol_dup(vma_policy(mpnt));
67143+ if (IS_ERR(pol))
67144+ goto fail_nomem_policy;
67145+ vma_set_policy(tmp, pol);
67146+ if (anon_vma_fork(tmp, mpnt))
67147+ goto fail_nomem_anon_vma_fork;
67148+ tmp->vm_flags &= ~VM_LOCKED;
67149+ tmp->vm_next = tmp->vm_prev = NULL;
67150+ tmp->vm_mirror = NULL;
67151+ file = tmp->vm_file;
67152+ if (file) {
67153+ struct inode *inode = file->f_path.dentry->d_inode;
67154+ struct address_space *mapping = file->f_mapping;
67155+
67156+ get_file(file);
67157+ if (tmp->vm_flags & VM_DENYWRITE)
67158+ atomic_dec(&inode->i_writecount);
67159+ mutex_lock(&mapping->i_mmap_mutex);
67160+ if (tmp->vm_flags & VM_SHARED)
67161+ mapping->i_mmap_writable++;
67162+ flush_dcache_mmap_lock(mapping);
67163+ /* insert tmp into the share list, just after mpnt */
67164+ vma_prio_tree_add(tmp, mpnt);
67165+ flush_dcache_mmap_unlock(mapping);
67166+ mutex_unlock(&mapping->i_mmap_mutex);
67167+ }
67168+
67169+ /*
67170+ * Clear hugetlb-related page reserves for children. This only
67171+ * affects MAP_PRIVATE mappings. Faults generated by the child
67172+ * are not guaranteed to succeed, even if read-only
67173+ */
67174+ if (is_vm_hugetlb_page(tmp))
67175+ reset_vma_resv_huge_pages(tmp);
67176+
67177+ return tmp;
67178+
67179+fail_nomem_anon_vma_fork:
67180+ mpol_put(pol);
67181+fail_nomem_policy:
67182+ kmem_cache_free(vm_area_cachep, tmp);
67183+fail_nomem:
67184+ vm_unacct_memory(charge);
67185+ return NULL;
67186+}
67187+
67188 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67189 {
67190 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
67191 struct rb_node **rb_link, *rb_parent;
67192 int retval;
67193- unsigned long charge;
67194- struct mempolicy *pol;
67195
67196 down_write(&oldmm->mmap_sem);
67197 flush_cache_dup_mm(oldmm);
67198@@ -363,8 +428,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67199 mm->locked_vm = 0;
67200 mm->mmap = NULL;
67201 mm->mmap_cache = NULL;
67202- mm->free_area_cache = oldmm->mmap_base;
67203- mm->cached_hole_size = ~0UL;
67204+ mm->free_area_cache = oldmm->free_area_cache;
67205+ mm->cached_hole_size = oldmm->cached_hole_size;
67206 mm->map_count = 0;
67207 cpumask_clear(mm_cpumask(mm));
67208 mm->mm_rb = RB_ROOT;
67209@@ -380,8 +445,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67210
67211 prev = NULL;
67212 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
67213- struct file *file;
67214-
67215 if (mpnt->vm_flags & VM_DONTCOPY) {
67216 long pages = vma_pages(mpnt);
67217 mm->total_vm -= pages;
67218@@ -389,54 +452,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67219 -pages);
67220 continue;
67221 }
67222- charge = 0;
67223- if (mpnt->vm_flags & VM_ACCOUNT) {
67224- unsigned long len;
67225- len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
67226- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
67227- goto fail_nomem;
67228- charge = len;
67229+ tmp = dup_vma(mm, oldmm, mpnt);
67230+ if (!tmp) {
67231+ retval = -ENOMEM;
67232+ goto out;
67233 }
67234- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
67235- if (!tmp)
67236- goto fail_nomem;
67237- *tmp = *mpnt;
67238- INIT_LIST_HEAD(&tmp->anon_vma_chain);
67239- pol = mpol_dup(vma_policy(mpnt));
67240- retval = PTR_ERR(pol);
67241- if (IS_ERR(pol))
67242- goto fail_nomem_policy;
67243- vma_set_policy(tmp, pol);
67244- tmp->vm_mm = mm;
67245- if (anon_vma_fork(tmp, mpnt))
67246- goto fail_nomem_anon_vma_fork;
67247- tmp->vm_flags &= ~VM_LOCKED;
67248- tmp->vm_next = tmp->vm_prev = NULL;
67249- file = tmp->vm_file;
67250- if (file) {
67251- struct inode *inode = file->f_path.dentry->d_inode;
67252- struct address_space *mapping = file->f_mapping;
67253-
67254- get_file(file);
67255- if (tmp->vm_flags & VM_DENYWRITE)
67256- atomic_dec(&inode->i_writecount);
67257- mutex_lock(&mapping->i_mmap_mutex);
67258- if (tmp->vm_flags & VM_SHARED)
67259- mapping->i_mmap_writable++;
67260- flush_dcache_mmap_lock(mapping);
67261- /* insert tmp into the share list, just after mpnt */
67262- vma_prio_tree_add(tmp, mpnt);
67263- flush_dcache_mmap_unlock(mapping);
67264- mutex_unlock(&mapping->i_mmap_mutex);
67265- }
67266-
67267- /*
67268- * Clear hugetlb-related page reserves for children. This only
67269- * affects MAP_PRIVATE mappings. Faults generated by the child
67270- * are not guaranteed to succeed, even if read-only
67271- */
67272- if (is_vm_hugetlb_page(tmp))
67273- reset_vma_resv_huge_pages(tmp);
67274
67275 /*
67276 * Link in the new vma and copy the page table entries.
67277@@ -459,9 +479,34 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
67278 if (retval)
67279 goto out;
67280
67281- if (file)
67282+ if (tmp->vm_file)
67283 uprobe_mmap(tmp);
67284 }
67285+
67286+#ifdef CONFIG_PAX_SEGMEXEC
67287+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
67288+ struct vm_area_struct *mpnt_m;
67289+
67290+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
67291+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
67292+
67293+ if (!mpnt->vm_mirror)
67294+ continue;
67295+
67296+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
67297+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
67298+ mpnt->vm_mirror = mpnt_m;
67299+ } else {
67300+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
67301+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
67302+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
67303+ mpnt->vm_mirror->vm_mirror = mpnt;
67304+ }
67305+ }
67306+ BUG_ON(mpnt_m);
67307+ }
67308+#endif
67309+
67310 /* a new mm has just been created */
67311 arch_dup_mmap(oldmm, mm);
67312 retval = 0;
67313@@ -470,14 +515,6 @@ out:
67314 flush_tlb_mm(oldmm);
67315 up_write(&oldmm->mmap_sem);
67316 return retval;
67317-fail_nomem_anon_vma_fork:
67318- mpol_put(pol);
67319-fail_nomem_policy:
67320- kmem_cache_free(vm_area_cachep, tmp);
67321-fail_nomem:
67322- retval = -ENOMEM;
67323- vm_unacct_memory(charge);
67324- goto out;
67325 }
67326
67327 static inline int mm_alloc_pgd(struct mm_struct *mm)
67328@@ -714,8 +751,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
67329 return ERR_PTR(err);
67330
67331 mm = get_task_mm(task);
67332- if (mm && mm != current->mm &&
67333- !ptrace_may_access(task, mode)) {
67334+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
67335+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
67336 mmput(mm);
67337 mm = ERR_PTR(-EACCES);
67338 }
67339@@ -936,13 +973,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
67340 spin_unlock(&fs->lock);
67341 return -EAGAIN;
67342 }
67343- fs->users++;
67344+ atomic_inc(&fs->users);
67345 spin_unlock(&fs->lock);
67346 return 0;
67347 }
67348 tsk->fs = copy_fs_struct(fs);
67349 if (!tsk->fs)
67350 return -ENOMEM;
67351+ gr_set_chroot_entries(tsk, &tsk->fs->root);
67352 return 0;
67353 }
67354
67355@@ -1209,6 +1247,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
67356 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
67357 #endif
67358 retval = -EAGAIN;
67359+
67360+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
67361+
67362 if (atomic_read(&p->real_cred->user->processes) >=
67363 task_rlimit(p, RLIMIT_NPROC)) {
67364 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
67365@@ -1431,6 +1472,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
67366 /* Need tasklist lock for parent etc handling! */
67367 write_lock_irq(&tasklist_lock);
67368
67369+ /* synchronizes with gr_set_acls() */
67370+ gr_copy_label(p);
67371+
67372 /* CLONE_PARENT re-uses the old parent */
67373 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
67374 p->real_parent = current->real_parent;
67375@@ -1541,6 +1585,8 @@ bad_fork_cleanup_count:
67376 bad_fork_free:
67377 free_task(p);
67378 fork_out:
67379+ gr_log_forkfail(retval);
67380+
67381 return ERR_PTR(retval);
67382 }
67383
67384@@ -1641,6 +1687,8 @@ long do_fork(unsigned long clone_flags,
67385 if (clone_flags & CLONE_PARENT_SETTID)
67386 put_user(nr, parent_tidptr);
67387
67388+ gr_handle_brute_check();
67389+
67390 if (clone_flags & CLONE_VFORK) {
67391 p->vfork_done = &vfork;
67392 init_completion(&vfork);
67393@@ -1739,7 +1787,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
67394 return 0;
67395
67396 /* don't need lock here; in the worst case we'll do useless copy */
67397- if (fs->users == 1)
67398+ if (atomic_read(&fs->users) == 1)
67399 return 0;
67400
67401 *new_fsp = copy_fs_struct(fs);
67402@@ -1828,7 +1876,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
67403 fs = current->fs;
67404 spin_lock(&fs->lock);
67405 current->fs = new_fs;
67406- if (--fs->users)
67407+ gr_set_chroot_entries(current, &current->fs->root);
67408+ if (atomic_dec_return(&fs->users))
67409 new_fs = NULL;
67410 else
67411 new_fs = fs;
67412diff --git a/kernel/futex.c b/kernel/futex.c
67413index 3717e7b..473c750 100644
67414--- a/kernel/futex.c
67415+++ b/kernel/futex.c
67416@@ -54,6 +54,7 @@
67417 #include <linux/mount.h>
67418 #include <linux/pagemap.h>
67419 #include <linux/syscalls.h>
67420+#include <linux/ptrace.h>
67421 #include <linux/signal.h>
67422 #include <linux/export.h>
67423 #include <linux/magic.h>
67424@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
67425 struct page *page, *page_head;
67426 int err, ro = 0;
67427
67428+#ifdef CONFIG_PAX_SEGMEXEC
67429+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
67430+ return -EFAULT;
67431+#endif
67432+
67433 /*
67434 * The futex address must be "naturally" aligned.
67435 */
67436@@ -2714,6 +2720,7 @@ static int __init futex_init(void)
67437 {
67438 u32 curval;
67439 int i;
67440+ mm_segment_t oldfs;
67441
67442 /*
67443 * This will fail and we want it. Some arch implementations do
67444@@ -2725,8 +2732,11 @@ static int __init futex_init(void)
67445 * implementation, the non-functional ones will return
67446 * -ENOSYS.
67447 */
67448+ oldfs = get_fs();
67449+ set_fs(USER_DS);
67450 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
67451 futex_cmpxchg_enabled = 1;
67452+ set_fs(oldfs);
67453
67454 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
67455 plist_head_init(&futex_queues[i].chain);
67456diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
67457index 9b22d03..6295b62 100644
67458--- a/kernel/gcov/base.c
67459+++ b/kernel/gcov/base.c
67460@@ -102,11 +102,6 @@ void gcov_enable_events(void)
67461 }
67462
67463 #ifdef CONFIG_MODULES
67464-static inline int within(void *addr, void *start, unsigned long size)
67465-{
67466- return ((addr >= start) && (addr < start + size));
67467-}
67468-
67469 /* Update list and generate events when modules are unloaded. */
67470 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
67471 void *data)
67472@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
67473 prev = NULL;
67474 /* Remove entries located in module from linked list. */
67475 for (info = gcov_info_head; info; info = info->next) {
67476- if (within(info, mod->module_core, mod->core_size)) {
67477+ if (within_module_core_rw((unsigned long)info, mod)) {
67478 if (prev)
67479 prev->next = info->next;
67480 else
67481diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
67482index 6db7a5e..25b6648 100644
67483--- a/kernel/hrtimer.c
67484+++ b/kernel/hrtimer.c
67485@@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
67486 local_irq_restore(flags);
67487 }
67488
67489-static void run_hrtimer_softirq(struct softirq_action *h)
67490+static void run_hrtimer_softirq(void)
67491 {
67492 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
67493
67494diff --git a/kernel/jump_label.c b/kernel/jump_label.c
67495index 4304919..408c4c0 100644
67496--- a/kernel/jump_label.c
67497+++ b/kernel/jump_label.c
67498@@ -13,6 +13,7 @@
67499 #include <linux/sort.h>
67500 #include <linux/err.h>
67501 #include <linux/static_key.h>
67502+#include <linux/mm.h>
67503
67504 #ifdef HAVE_JUMP_LABEL
67505
67506@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
67507
67508 size = (((unsigned long)stop - (unsigned long)start)
67509 / sizeof(struct jump_entry));
67510+ pax_open_kernel();
67511 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
67512+ pax_close_kernel();
67513 }
67514
67515 static void jump_label_update(struct static_key *key, int enable);
67516@@ -356,10 +359,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
67517 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
67518 struct jump_entry *iter;
67519
67520+ pax_open_kernel();
67521 for (iter = iter_start; iter < iter_stop; iter++) {
67522 if (within_module_init(iter->code, mod))
67523 iter->code = 0;
67524 }
67525+ pax_close_kernel();
67526 }
67527
67528 static int
67529diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
67530index 2169fee..45c017a 100644
67531--- a/kernel/kallsyms.c
67532+++ b/kernel/kallsyms.c
67533@@ -11,6 +11,9 @@
67534 * Changed the compression method from stem compression to "table lookup"
67535 * compression (see scripts/kallsyms.c for a more complete description)
67536 */
67537+#ifdef CONFIG_GRKERNSEC_HIDESYM
67538+#define __INCLUDED_BY_HIDESYM 1
67539+#endif
67540 #include <linux/kallsyms.h>
67541 #include <linux/module.h>
67542 #include <linux/init.h>
67543@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
67544
67545 static inline int is_kernel_inittext(unsigned long addr)
67546 {
67547+ if (system_state != SYSTEM_BOOTING)
67548+ return 0;
67549+
67550 if (addr >= (unsigned long)_sinittext
67551 && addr <= (unsigned long)_einittext)
67552 return 1;
67553 return 0;
67554 }
67555
67556+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67557+#ifdef CONFIG_MODULES
67558+static inline int is_module_text(unsigned long addr)
67559+{
67560+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
67561+ return 1;
67562+
67563+ addr = ktla_ktva(addr);
67564+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
67565+}
67566+#else
67567+static inline int is_module_text(unsigned long addr)
67568+{
67569+ return 0;
67570+}
67571+#endif
67572+#endif
67573+
67574 static inline int is_kernel_text(unsigned long addr)
67575 {
67576 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
67577@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
67578
67579 static inline int is_kernel(unsigned long addr)
67580 {
67581+
67582+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67583+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
67584+ return 1;
67585+
67586+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
67587+#else
67588 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
67589+#endif
67590+
67591 return 1;
67592 return in_gate_area_no_mm(addr);
67593 }
67594
67595 static int is_ksym_addr(unsigned long addr)
67596 {
67597+
67598+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67599+ if (is_module_text(addr))
67600+ return 0;
67601+#endif
67602+
67603 if (all_var)
67604 return is_kernel(addr);
67605
67606@@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
67607
67608 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
67609 {
67610- iter->name[0] = '\0';
67611 iter->nameoff = get_symbol_offset(new_pos);
67612 iter->pos = new_pos;
67613 }
67614@@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
67615 {
67616 struct kallsym_iter *iter = m->private;
67617
67618+#ifdef CONFIG_GRKERNSEC_HIDESYM
67619+ if (current_uid())
67620+ return 0;
67621+#endif
67622+
67623 /* Some debugging symbols have no name. Ignore them. */
67624 if (!iter->name[0])
67625 return 0;
67626@@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
67627 */
67628 type = iter->exported ? toupper(iter->type) :
67629 tolower(iter->type);
67630+
67631 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
67632 type, iter->name, iter->module_name);
67633 } else
67634@@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
67635 struct kallsym_iter *iter;
67636 int ret;
67637
67638- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
67639+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
67640 if (!iter)
67641 return -ENOMEM;
67642 reset_iter(iter, 0);
67643diff --git a/kernel/kexec.c b/kernel/kexec.c
67644index 4e2e472..cd0c7ae 100644
67645--- a/kernel/kexec.c
67646+++ b/kernel/kexec.c
67647@@ -1046,7 +1046,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
67648 unsigned long flags)
67649 {
67650 struct compat_kexec_segment in;
67651- struct kexec_segment out, __user *ksegments;
67652+ struct kexec_segment out;
67653+ struct kexec_segment __user *ksegments;
67654 unsigned long i, result;
67655
67656 /* Don't allow clients that don't understand the native
67657diff --git a/kernel/kmod.c b/kernel/kmod.c
67658index ff2c7cb..085d7af 100644
67659--- a/kernel/kmod.c
67660+++ b/kernel/kmod.c
67661@@ -66,7 +66,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
67662 kfree(info->argv);
67663 }
67664
67665-static int call_modprobe(char *module_name, int wait)
67666+static int call_modprobe(char *module_name, char *module_param, int wait)
67667 {
67668 static char *envp[] = {
67669 "HOME=/",
67670@@ -75,7 +75,7 @@ static int call_modprobe(char *module_name, int wait)
67671 NULL
67672 };
67673
67674- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
67675+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
67676 if (!argv)
67677 goto out;
67678
67679@@ -87,7 +87,8 @@ static int call_modprobe(char *module_name, int wait)
67680 argv[1] = "-q";
67681 argv[2] = "--";
67682 argv[3] = module_name; /* check free_modprobe_argv() */
67683- argv[4] = NULL;
67684+ argv[4] = module_param;
67685+ argv[5] = NULL;
67686
67687 return call_usermodehelper_fns(modprobe_path, argv, envp,
67688 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
67689@@ -112,9 +113,8 @@ out:
67690 * If module auto-loading support is disabled then this function
67691 * becomes a no-operation.
67692 */
67693-int __request_module(bool wait, const char *fmt, ...)
67694+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
67695 {
67696- va_list args;
67697 char module_name[MODULE_NAME_LEN];
67698 unsigned int max_modprobes;
67699 int ret;
67700@@ -122,9 +122,7 @@ int __request_module(bool wait, const char *fmt, ...)
67701 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
67702 static int kmod_loop_msg;
67703
67704- va_start(args, fmt);
67705- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
67706- va_end(args);
67707+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
67708 if (ret >= MODULE_NAME_LEN)
67709 return -ENAMETOOLONG;
67710
67711@@ -132,6 +130,20 @@ int __request_module(bool wait, const char *fmt, ...)
67712 if (ret)
67713 return ret;
67714
67715+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67716+ if (!current_uid()) {
67717+ /* hack to workaround consolekit/udisks stupidity */
67718+ read_lock(&tasklist_lock);
67719+ if (!strcmp(current->comm, "mount") &&
67720+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
67721+ read_unlock(&tasklist_lock);
67722+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
67723+ return -EPERM;
67724+ }
67725+ read_unlock(&tasklist_lock);
67726+ }
67727+#endif
67728+
67729 /* If modprobe needs a service that is in a module, we get a recursive
67730 * loop. Limit the number of running kmod threads to max_threads/2 or
67731 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
67732@@ -160,11 +172,52 @@ int __request_module(bool wait, const char *fmt, ...)
67733
67734 trace_module_request(module_name, wait, _RET_IP_);
67735
67736- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
67737+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
67738
67739 atomic_dec(&kmod_concurrent);
67740 return ret;
67741 }
67742+
67743+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
67744+{
67745+ va_list args;
67746+ int ret;
67747+
67748+ va_start(args, fmt);
67749+ ret = ____request_module(wait, module_param, fmt, args);
67750+ va_end(args);
67751+
67752+ return ret;
67753+}
67754+
67755+int __request_module(bool wait, const char *fmt, ...)
67756+{
67757+ va_list args;
67758+ int ret;
67759+
67760+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67761+ if (current_uid()) {
67762+ char module_param[MODULE_NAME_LEN];
67763+
67764+ memset(module_param, 0, sizeof(module_param));
67765+
67766+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
67767+
67768+ va_start(args, fmt);
67769+ ret = ____request_module(wait, module_param, fmt, args);
67770+ va_end(args);
67771+
67772+ return ret;
67773+ }
67774+#endif
67775+
67776+ va_start(args, fmt);
67777+ ret = ____request_module(wait, NULL, fmt, args);
67778+ va_end(args);
67779+
67780+ return ret;
67781+}
67782+
67783 EXPORT_SYMBOL(__request_module);
67784 #endif /* CONFIG_MODULES */
67785
67786@@ -266,7 +319,7 @@ static int wait_for_helper(void *data)
67787 *
67788 * Thus the __user pointer cast is valid here.
67789 */
67790- sys_wait4(pid, (int __user *)&ret, 0, NULL);
67791+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
67792
67793 /*
67794 * If ret is 0, either ____call_usermodehelper failed and the
67795diff --git a/kernel/kprobes.c b/kernel/kprobes.c
67796index c62b854..cb67968 100644
67797--- a/kernel/kprobes.c
67798+++ b/kernel/kprobes.c
67799@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
67800 * kernel image and loaded module images reside. This is required
67801 * so x86_64 can correctly handle the %rip-relative fixups.
67802 */
67803- kip->insns = module_alloc(PAGE_SIZE);
67804+ kip->insns = module_alloc_exec(PAGE_SIZE);
67805 if (!kip->insns) {
67806 kfree(kip);
67807 return NULL;
67808@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
67809 */
67810 if (!list_is_singular(&kip->list)) {
67811 list_del(&kip->list);
67812- module_free(NULL, kip->insns);
67813+ module_free_exec(NULL, kip->insns);
67814 kfree(kip);
67815 }
67816 return 1;
67817@@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
67818 {
67819 int i, err = 0;
67820 unsigned long offset = 0, size = 0;
67821- char *modname, namebuf[128];
67822+ char *modname, namebuf[KSYM_NAME_LEN];
67823 const char *symbol_name;
67824 void *addr;
67825 struct kprobe_blackpoint *kb;
67826@@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
67827 const char *sym = NULL;
67828 unsigned int i = *(loff_t *) v;
67829 unsigned long offset = 0;
67830- char *modname, namebuf[128];
67831+ char *modname, namebuf[KSYM_NAME_LEN];
67832
67833 head = &kprobe_table[i];
67834 preempt_disable();
67835diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
67836index 4e316e1..5501eef 100644
67837--- a/kernel/ksysfs.c
67838+++ b/kernel/ksysfs.c
67839@@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
67840 {
67841 if (count+1 > UEVENT_HELPER_PATH_LEN)
67842 return -ENOENT;
67843+ if (!capable(CAP_SYS_ADMIN))
67844+ return -EPERM;
67845 memcpy(uevent_helper, buf, count);
67846 uevent_helper[count] = '\0';
67847 if (count && uevent_helper[count-1] == '\n')
67848diff --git a/kernel/lockdep.c b/kernel/lockdep.c
67849index ea9ee45..67ebc8f 100644
67850--- a/kernel/lockdep.c
67851+++ b/kernel/lockdep.c
67852@@ -590,6 +590,10 @@ static int static_obj(void *obj)
67853 end = (unsigned long) &_end,
67854 addr = (unsigned long) obj;
67855
67856+#ifdef CONFIG_PAX_KERNEXEC
67857+ start = ktla_ktva(start);
67858+#endif
67859+
67860 /*
67861 * static variable?
67862 */
67863@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
67864 if (!static_obj(lock->key)) {
67865 debug_locks_off();
67866 printk("INFO: trying to register non-static key.\n");
67867+ printk("lock:%pS key:%pS.\n", lock, lock->key);
67868 printk("the code is fine but needs lockdep annotation.\n");
67869 printk("turning off the locking correctness validator.\n");
67870 dump_stack();
67871@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
67872 if (!class)
67873 return 0;
67874 }
67875- atomic_inc((atomic_t *)&class->ops);
67876+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
67877 if (very_verbose(class)) {
67878 printk("\nacquire class [%p] %s", class->key, class->name);
67879 if (class->name_version > 1)
67880diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
67881index 91c32a0..b2c71c5 100644
67882--- a/kernel/lockdep_proc.c
67883+++ b/kernel/lockdep_proc.c
67884@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
67885
67886 static void print_name(struct seq_file *m, struct lock_class *class)
67887 {
67888- char str[128];
67889+ char str[KSYM_NAME_LEN];
67890 const char *name = class->name;
67891
67892 if (!name) {
67893diff --git a/kernel/module.c b/kernel/module.c
67894index 4edbd9c..165e780 100644
67895--- a/kernel/module.c
67896+++ b/kernel/module.c
67897@@ -58,6 +58,7 @@
67898 #include <linux/jump_label.h>
67899 #include <linux/pfn.h>
67900 #include <linux/bsearch.h>
67901+#include <linux/grsecurity.h>
67902
67903 #define CREATE_TRACE_POINTS
67904 #include <trace/events/module.h>
67905@@ -114,7 +115,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
67906
67907 /* Bounds of module allocation, for speeding __module_address.
67908 * Protected by module_mutex. */
67909-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
67910+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
67911+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
67912
67913 int register_module_notifier(struct notifier_block * nb)
67914 {
67915@@ -278,7 +280,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
67916 return true;
67917
67918 list_for_each_entry_rcu(mod, &modules, list) {
67919- struct symsearch arr[] = {
67920+ struct symsearch modarr[] = {
67921 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
67922 NOT_GPL_ONLY, false },
67923 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
67924@@ -300,7 +302,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
67925 #endif
67926 };
67927
67928- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
67929+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
67930 return true;
67931 }
67932 return false;
67933@@ -432,7 +434,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
67934 static int percpu_modalloc(struct module *mod,
67935 unsigned long size, unsigned long align)
67936 {
67937- if (align > PAGE_SIZE) {
67938+ if (align-1 >= PAGE_SIZE) {
67939 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
67940 mod->name, align, PAGE_SIZE);
67941 align = PAGE_SIZE;
67942@@ -1032,7 +1034,7 @@ struct module_attribute module_uevent =
67943 static ssize_t show_coresize(struct module_attribute *mattr,
67944 struct module_kobject *mk, char *buffer)
67945 {
67946- return sprintf(buffer, "%u\n", mk->mod->core_size);
67947+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
67948 }
67949
67950 static struct module_attribute modinfo_coresize =
67951@@ -1041,7 +1043,7 @@ static struct module_attribute modinfo_coresize =
67952 static ssize_t show_initsize(struct module_attribute *mattr,
67953 struct module_kobject *mk, char *buffer)
67954 {
67955- return sprintf(buffer, "%u\n", mk->mod->init_size);
67956+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
67957 }
67958
67959 static struct module_attribute modinfo_initsize =
67960@@ -1255,7 +1257,7 @@ resolve_symbol_wait(struct module *mod,
67961 */
67962 #ifdef CONFIG_SYSFS
67963
67964-#ifdef CONFIG_KALLSYMS
67965+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67966 static inline bool sect_empty(const Elf_Shdr *sect)
67967 {
67968 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
67969@@ -1721,21 +1723,21 @@ static void set_section_ro_nx(void *base,
67970
67971 static void unset_module_core_ro_nx(struct module *mod)
67972 {
67973- set_page_attributes(mod->module_core + mod->core_text_size,
67974- mod->module_core + mod->core_size,
67975+ set_page_attributes(mod->module_core_rw,
67976+ mod->module_core_rw + mod->core_size_rw,
67977 set_memory_x);
67978- set_page_attributes(mod->module_core,
67979- mod->module_core + mod->core_ro_size,
67980+ set_page_attributes(mod->module_core_rx,
67981+ mod->module_core_rx + mod->core_size_rx,
67982 set_memory_rw);
67983 }
67984
67985 static void unset_module_init_ro_nx(struct module *mod)
67986 {
67987- set_page_attributes(mod->module_init + mod->init_text_size,
67988- mod->module_init + mod->init_size,
67989+ set_page_attributes(mod->module_init_rw,
67990+ mod->module_init_rw + mod->init_size_rw,
67991 set_memory_x);
67992- set_page_attributes(mod->module_init,
67993- mod->module_init + mod->init_ro_size,
67994+ set_page_attributes(mod->module_init_rx,
67995+ mod->module_init_rx + mod->init_size_rx,
67996 set_memory_rw);
67997 }
67998
67999@@ -1746,14 +1748,14 @@ void set_all_modules_text_rw(void)
68000
68001 mutex_lock(&module_mutex);
68002 list_for_each_entry_rcu(mod, &modules, list) {
68003- if ((mod->module_core) && (mod->core_text_size)) {
68004- set_page_attributes(mod->module_core,
68005- mod->module_core + mod->core_text_size,
68006+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
68007+ set_page_attributes(mod->module_core_rx,
68008+ mod->module_core_rx + mod->core_size_rx,
68009 set_memory_rw);
68010 }
68011- if ((mod->module_init) && (mod->init_text_size)) {
68012- set_page_attributes(mod->module_init,
68013- mod->module_init + mod->init_text_size,
68014+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
68015+ set_page_attributes(mod->module_init_rx,
68016+ mod->module_init_rx + mod->init_size_rx,
68017 set_memory_rw);
68018 }
68019 }
68020@@ -1767,14 +1769,14 @@ void set_all_modules_text_ro(void)
68021
68022 mutex_lock(&module_mutex);
68023 list_for_each_entry_rcu(mod, &modules, list) {
68024- if ((mod->module_core) && (mod->core_text_size)) {
68025- set_page_attributes(mod->module_core,
68026- mod->module_core + mod->core_text_size,
68027+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
68028+ set_page_attributes(mod->module_core_rx,
68029+ mod->module_core_rx + mod->core_size_rx,
68030 set_memory_ro);
68031 }
68032- if ((mod->module_init) && (mod->init_text_size)) {
68033- set_page_attributes(mod->module_init,
68034- mod->module_init + mod->init_text_size,
68035+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
68036+ set_page_attributes(mod->module_init_rx,
68037+ mod->module_init_rx + mod->init_size_rx,
68038 set_memory_ro);
68039 }
68040 }
68041@@ -1820,16 +1822,19 @@ static void free_module(struct module *mod)
68042
68043 /* This may be NULL, but that's OK */
68044 unset_module_init_ro_nx(mod);
68045- module_free(mod, mod->module_init);
68046+ module_free(mod, mod->module_init_rw);
68047+ module_free_exec(mod, mod->module_init_rx);
68048 kfree(mod->args);
68049 percpu_modfree(mod);
68050
68051 /* Free lock-classes: */
68052- lockdep_free_key_range(mod->module_core, mod->core_size);
68053+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
68054+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
68055
68056 /* Finally, free the core (containing the module structure) */
68057 unset_module_core_ro_nx(mod);
68058- module_free(mod, mod->module_core);
68059+ module_free_exec(mod, mod->module_core_rx);
68060+ module_free(mod, mod->module_core_rw);
68061
68062 #ifdef CONFIG_MPU
68063 update_protections(current->mm);
68064@@ -1899,9 +1904,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
68065 int ret = 0;
68066 const struct kernel_symbol *ksym;
68067
68068+#ifdef CONFIG_GRKERNSEC_MODHARDEN
68069+ int is_fs_load = 0;
68070+ int register_filesystem_found = 0;
68071+ char *p;
68072+
68073+ p = strstr(mod->args, "grsec_modharden_fs");
68074+ if (p) {
68075+ char *endptr = p + strlen("grsec_modharden_fs");
68076+ /* copy \0 as well */
68077+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
68078+ is_fs_load = 1;
68079+ }
68080+#endif
68081+
68082 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
68083 const char *name = info->strtab + sym[i].st_name;
68084
68085+#ifdef CONFIG_GRKERNSEC_MODHARDEN
68086+ /* it's a real shame this will never get ripped and copied
68087+ upstream! ;(
68088+ */
68089+ if (is_fs_load && !strcmp(name, "register_filesystem"))
68090+ register_filesystem_found = 1;
68091+#endif
68092+
68093 switch (sym[i].st_shndx) {
68094 case SHN_COMMON:
68095 /* We compiled with -fno-common. These are not
68096@@ -1922,7 +1949,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
68097 ksym = resolve_symbol_wait(mod, info, name);
68098 /* Ok if resolved. */
68099 if (ksym && !IS_ERR(ksym)) {
68100+ pax_open_kernel();
68101 sym[i].st_value = ksym->value;
68102+ pax_close_kernel();
68103 break;
68104 }
68105
68106@@ -1941,11 +1970,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
68107 secbase = (unsigned long)mod_percpu(mod);
68108 else
68109 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
68110+ pax_open_kernel();
68111 sym[i].st_value += secbase;
68112+ pax_close_kernel();
68113 break;
68114 }
68115 }
68116
68117+#ifdef CONFIG_GRKERNSEC_MODHARDEN
68118+ if (is_fs_load && !register_filesystem_found) {
68119+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
68120+ ret = -EPERM;
68121+ }
68122+#endif
68123+
68124 return ret;
68125 }
68126
68127@@ -2049,22 +2087,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
68128 || s->sh_entsize != ~0UL
68129 || strstarts(sname, ".init"))
68130 continue;
68131- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
68132+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
68133+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
68134+ else
68135+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
68136 pr_debug("\t%s\n", sname);
68137 }
68138- switch (m) {
68139- case 0: /* executable */
68140- mod->core_size = debug_align(mod->core_size);
68141- mod->core_text_size = mod->core_size;
68142- break;
68143- case 1: /* RO: text and ro-data */
68144- mod->core_size = debug_align(mod->core_size);
68145- mod->core_ro_size = mod->core_size;
68146- break;
68147- case 3: /* whole core */
68148- mod->core_size = debug_align(mod->core_size);
68149- break;
68150- }
68151 }
68152
68153 pr_debug("Init section allocation order:\n");
68154@@ -2078,23 +2106,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
68155 || s->sh_entsize != ~0UL
68156 || !strstarts(sname, ".init"))
68157 continue;
68158- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
68159- | INIT_OFFSET_MASK);
68160+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
68161+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
68162+ else
68163+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
68164+ s->sh_entsize |= INIT_OFFSET_MASK;
68165 pr_debug("\t%s\n", sname);
68166 }
68167- switch (m) {
68168- case 0: /* executable */
68169- mod->init_size = debug_align(mod->init_size);
68170- mod->init_text_size = mod->init_size;
68171- break;
68172- case 1: /* RO: text and ro-data */
68173- mod->init_size = debug_align(mod->init_size);
68174- mod->init_ro_size = mod->init_size;
68175- break;
68176- case 3: /* whole init */
68177- mod->init_size = debug_align(mod->init_size);
68178- break;
68179- }
68180 }
68181 }
68182
68183@@ -2266,7 +2284,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
68184
68185 /* Put symbol section at end of init part of module. */
68186 symsect->sh_flags |= SHF_ALLOC;
68187- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
68188+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
68189 info->index.sym) | INIT_OFFSET_MASK;
68190 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
68191
68192@@ -2281,13 +2299,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
68193 }
68194
68195 /* Append room for core symbols at end of core part. */
68196- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
68197- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
68198- mod->core_size += strtab_size;
68199+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
68200+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
68201+ mod->core_size_rx += strtab_size;
68202
68203 /* Put string table section at end of init part of module. */
68204 strsect->sh_flags |= SHF_ALLOC;
68205- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
68206+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
68207 info->index.str) | INIT_OFFSET_MASK;
68208 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
68209 }
68210@@ -2305,12 +2323,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
68211 /* Make sure we get permanent strtab: don't use info->strtab. */
68212 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
68213
68214+ pax_open_kernel();
68215+
68216 /* Set types up while we still have access to sections. */
68217 for (i = 0; i < mod->num_symtab; i++)
68218 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
68219
68220- mod->core_symtab = dst = mod->module_core + info->symoffs;
68221- mod->core_strtab = s = mod->module_core + info->stroffs;
68222+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
68223+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
68224 src = mod->symtab;
68225 *dst = *src;
68226 *s++ = 0;
68227@@ -2323,6 +2343,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
68228 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
68229 }
68230 mod->core_num_syms = ndst;
68231+
68232+ pax_close_kernel();
68233 }
68234 #else
68235 static inline void layout_symtab(struct module *mod, struct load_info *info)
68236@@ -2356,17 +2378,33 @@ void * __weak module_alloc(unsigned long size)
68237 return size == 0 ? NULL : vmalloc_exec(size);
68238 }
68239
68240-static void *module_alloc_update_bounds(unsigned long size)
68241+static void *module_alloc_update_bounds_rw(unsigned long size)
68242 {
68243 void *ret = module_alloc(size);
68244
68245 if (ret) {
68246 mutex_lock(&module_mutex);
68247 /* Update module bounds. */
68248- if ((unsigned long)ret < module_addr_min)
68249- module_addr_min = (unsigned long)ret;
68250- if ((unsigned long)ret + size > module_addr_max)
68251- module_addr_max = (unsigned long)ret + size;
68252+ if ((unsigned long)ret < module_addr_min_rw)
68253+ module_addr_min_rw = (unsigned long)ret;
68254+ if ((unsigned long)ret + size > module_addr_max_rw)
68255+ module_addr_max_rw = (unsigned long)ret + size;
68256+ mutex_unlock(&module_mutex);
68257+ }
68258+ return ret;
68259+}
68260+
68261+static void *module_alloc_update_bounds_rx(unsigned long size)
68262+{
68263+ void *ret = module_alloc_exec(size);
68264+
68265+ if (ret) {
68266+ mutex_lock(&module_mutex);
68267+ /* Update module bounds. */
68268+ if ((unsigned long)ret < module_addr_min_rx)
68269+ module_addr_min_rx = (unsigned long)ret;
68270+ if ((unsigned long)ret + size > module_addr_max_rx)
68271+ module_addr_max_rx = (unsigned long)ret + size;
68272 mutex_unlock(&module_mutex);
68273 }
68274 return ret;
68275@@ -2544,8 +2582,14 @@ static struct module *setup_load_info(struct load_info *info)
68276 static int check_modinfo(struct module *mod, struct load_info *info)
68277 {
68278 const char *modmagic = get_modinfo(info, "vermagic");
68279+ const char *license = get_modinfo(info, "license");
68280 int err;
68281
68282+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
68283+ if (!license || !license_is_gpl_compatible(license))
68284+ return -ENOEXEC;
68285+#endif
68286+
68287 /* This is allowed: modprobe --force will invalidate it. */
68288 if (!modmagic) {
68289 err = try_to_force_load(mod, "bad vermagic");
68290@@ -2568,7 +2612,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
68291 }
68292
68293 /* Set up license info based on the info section */
68294- set_license(mod, get_modinfo(info, "license"));
68295+ set_license(mod, license);
68296
68297 return 0;
68298 }
68299@@ -2662,7 +2706,7 @@ static int move_module(struct module *mod, struct load_info *info)
68300 void *ptr;
68301
68302 /* Do the allocs. */
68303- ptr = module_alloc_update_bounds(mod->core_size);
68304+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
68305 /*
68306 * The pointer to this block is stored in the module structure
68307 * which is inside the block. Just mark it as not being a
68308@@ -2672,23 +2716,50 @@ static int move_module(struct module *mod, struct load_info *info)
68309 if (!ptr)
68310 return -ENOMEM;
68311
68312- memset(ptr, 0, mod->core_size);
68313- mod->module_core = ptr;
68314+ memset(ptr, 0, mod->core_size_rw);
68315+ mod->module_core_rw = ptr;
68316
68317- ptr = module_alloc_update_bounds(mod->init_size);
68318+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
68319 /*
68320 * The pointer to this block is stored in the module structure
68321 * which is inside the block. This block doesn't need to be
68322 * scanned as it contains data and code that will be freed
68323 * after the module is initialized.
68324 */
68325- kmemleak_ignore(ptr);
68326- if (!ptr && mod->init_size) {
68327- module_free(mod, mod->module_core);
68328+ kmemleak_not_leak(ptr);
68329+ if (!ptr && mod->init_size_rw) {
68330+ module_free(mod, mod->module_core_rw);
68331 return -ENOMEM;
68332 }
68333- memset(ptr, 0, mod->init_size);
68334- mod->module_init = ptr;
68335+ memset(ptr, 0, mod->init_size_rw);
68336+ mod->module_init_rw = ptr;
68337+
68338+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
68339+ kmemleak_not_leak(ptr);
68340+ if (!ptr) {
68341+ module_free(mod, mod->module_init_rw);
68342+ module_free(mod, mod->module_core_rw);
68343+ return -ENOMEM;
68344+ }
68345+
68346+ pax_open_kernel();
68347+ memset(ptr, 0, mod->core_size_rx);
68348+ pax_close_kernel();
68349+ mod->module_core_rx = ptr;
68350+
68351+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
68352+ kmemleak_not_leak(ptr);
68353+ if (!ptr && mod->init_size_rx) {
68354+ module_free_exec(mod, mod->module_core_rx);
68355+ module_free(mod, mod->module_init_rw);
68356+ module_free(mod, mod->module_core_rw);
68357+ return -ENOMEM;
68358+ }
68359+
68360+ pax_open_kernel();
68361+ memset(ptr, 0, mod->init_size_rx);
68362+ pax_close_kernel();
68363+ mod->module_init_rx = ptr;
68364
68365 /* Transfer each section which specifies SHF_ALLOC */
68366 pr_debug("final section addresses:\n");
68367@@ -2699,16 +2770,45 @@ static int move_module(struct module *mod, struct load_info *info)
68368 if (!(shdr->sh_flags & SHF_ALLOC))
68369 continue;
68370
68371- if (shdr->sh_entsize & INIT_OFFSET_MASK)
68372- dest = mod->module_init
68373- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
68374- else
68375- dest = mod->module_core + shdr->sh_entsize;
68376+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
68377+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
68378+ dest = mod->module_init_rw
68379+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
68380+ else
68381+ dest = mod->module_init_rx
68382+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
68383+ } else {
68384+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
68385+ dest = mod->module_core_rw + shdr->sh_entsize;
68386+ else
68387+ dest = mod->module_core_rx + shdr->sh_entsize;
68388+ }
68389+
68390+ if (shdr->sh_type != SHT_NOBITS) {
68391+
68392+#ifdef CONFIG_PAX_KERNEXEC
68393+#ifdef CONFIG_X86_64
68394+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
68395+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
68396+#endif
68397+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
68398+ pax_open_kernel();
68399+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
68400+ pax_close_kernel();
68401+ } else
68402+#endif
68403
68404- if (shdr->sh_type != SHT_NOBITS)
68405 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
68406+ }
68407 /* Update sh_addr to point to copy in image. */
68408- shdr->sh_addr = (unsigned long)dest;
68409+
68410+#ifdef CONFIG_PAX_KERNEXEC
68411+ if (shdr->sh_flags & SHF_EXECINSTR)
68412+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
68413+ else
68414+#endif
68415+
68416+ shdr->sh_addr = (unsigned long)dest;
68417 pr_debug("\t0x%lx %s\n",
68418 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
68419 }
68420@@ -2759,12 +2859,12 @@ static void flush_module_icache(const struct module *mod)
68421 * Do it before processing of module parameters, so the module
68422 * can provide parameter accessor functions of its own.
68423 */
68424- if (mod->module_init)
68425- flush_icache_range((unsigned long)mod->module_init,
68426- (unsigned long)mod->module_init
68427- + mod->init_size);
68428- flush_icache_range((unsigned long)mod->module_core,
68429- (unsigned long)mod->module_core + mod->core_size);
68430+ if (mod->module_init_rx)
68431+ flush_icache_range((unsigned long)mod->module_init_rx,
68432+ (unsigned long)mod->module_init_rx
68433+ + mod->init_size_rx);
68434+ flush_icache_range((unsigned long)mod->module_core_rx,
68435+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
68436
68437 set_fs(old_fs);
68438 }
68439@@ -2834,8 +2934,10 @@ out:
68440 static void module_deallocate(struct module *mod, struct load_info *info)
68441 {
68442 percpu_modfree(mod);
68443- module_free(mod, mod->module_init);
68444- module_free(mod, mod->module_core);
68445+ module_free_exec(mod, mod->module_init_rx);
68446+ module_free_exec(mod, mod->module_core_rx);
68447+ module_free(mod, mod->module_init_rw);
68448+ module_free(mod, mod->module_core_rw);
68449 }
68450
68451 int __weak module_finalize(const Elf_Ehdr *hdr,
68452@@ -2848,7 +2950,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
68453 static int post_relocation(struct module *mod, const struct load_info *info)
68454 {
68455 /* Sort exception table now relocations are done. */
68456+ pax_open_kernel();
68457 sort_extable(mod->extable, mod->extable + mod->num_exentries);
68458+ pax_close_kernel();
68459
68460 /* Copy relocated percpu area over. */
68461 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
68462@@ -2899,9 +3003,38 @@ static struct module *load_module(void __user *umod,
68463 if (err)
68464 goto free_unload;
68465
68466+ /* Now copy in args */
68467+ mod->args = strndup_user(uargs, ~0UL >> 1);
68468+ if (IS_ERR(mod->args)) {
68469+ err = PTR_ERR(mod->args);
68470+ goto free_unload;
68471+ }
68472+
68473 /* Set up MODINFO_ATTR fields */
68474 setup_modinfo(mod, &info);
68475
68476+#ifdef CONFIG_GRKERNSEC_MODHARDEN
68477+ {
68478+ char *p, *p2;
68479+
68480+ if (strstr(mod->args, "grsec_modharden_netdev")) {
68481+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
68482+ err = -EPERM;
68483+ goto free_modinfo;
68484+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
68485+ p += strlen("grsec_modharden_normal");
68486+ p2 = strstr(p, "_");
68487+ if (p2) {
68488+ *p2 = '\0';
68489+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
68490+ *p2 = '_';
68491+ }
68492+ err = -EPERM;
68493+ goto free_modinfo;
68494+ }
68495+ }
68496+#endif
68497+
68498 /* Fix up syms, so that st_value is a pointer to location. */
68499 err = simplify_symbols(mod, &info);
68500 if (err < 0)
68501@@ -2917,13 +3050,6 @@ static struct module *load_module(void __user *umod,
68502
68503 flush_module_icache(mod);
68504
68505- /* Now copy in args */
68506- mod->args = strndup_user(uargs, ~0UL >> 1);
68507- if (IS_ERR(mod->args)) {
68508- err = PTR_ERR(mod->args);
68509- goto free_arch_cleanup;
68510- }
68511-
68512 /* Mark state as coming so strong_try_module_get() ignores us. */
68513 mod->state = MODULE_STATE_COMING;
68514
68515@@ -2981,11 +3107,10 @@ static struct module *load_module(void __user *umod,
68516 unlock:
68517 mutex_unlock(&module_mutex);
68518 synchronize_sched();
68519- kfree(mod->args);
68520- free_arch_cleanup:
68521 module_arch_cleanup(mod);
68522 free_modinfo:
68523 free_modinfo(mod);
68524+ kfree(mod->args);
68525 free_unload:
68526 module_unload_free(mod);
68527 free_module:
68528@@ -3026,16 +3151,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
68529 MODULE_STATE_COMING, mod);
68530
68531 /* Set RO and NX regions for core */
68532- set_section_ro_nx(mod->module_core,
68533- mod->core_text_size,
68534- mod->core_ro_size,
68535- mod->core_size);
68536+ set_section_ro_nx(mod->module_core_rx,
68537+ mod->core_size_rx,
68538+ mod->core_size_rx,
68539+ mod->core_size_rx);
68540
68541 /* Set RO and NX regions for init */
68542- set_section_ro_nx(mod->module_init,
68543- mod->init_text_size,
68544- mod->init_ro_size,
68545- mod->init_size);
68546+ set_section_ro_nx(mod->module_init_rx,
68547+ mod->init_size_rx,
68548+ mod->init_size_rx,
68549+ mod->init_size_rx);
68550
68551 do_mod_ctors(mod);
68552 /* Start the module */
68553@@ -3081,11 +3206,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
68554 mod->strtab = mod->core_strtab;
68555 #endif
68556 unset_module_init_ro_nx(mod);
68557- module_free(mod, mod->module_init);
68558- mod->module_init = NULL;
68559- mod->init_size = 0;
68560- mod->init_ro_size = 0;
68561- mod->init_text_size = 0;
68562+ module_free(mod, mod->module_init_rw);
68563+ module_free_exec(mod, mod->module_init_rx);
68564+ mod->module_init_rw = NULL;
68565+ mod->module_init_rx = NULL;
68566+ mod->init_size_rw = 0;
68567+ mod->init_size_rx = 0;
68568 mutex_unlock(&module_mutex);
68569
68570 return 0;
68571@@ -3116,10 +3242,16 @@ static const char *get_ksymbol(struct module *mod,
68572 unsigned long nextval;
68573
68574 /* At worse, next value is at end of module */
68575- if (within_module_init(addr, mod))
68576- nextval = (unsigned long)mod->module_init+mod->init_text_size;
68577+ if (within_module_init_rx(addr, mod))
68578+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
68579+ else if (within_module_init_rw(addr, mod))
68580+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
68581+ else if (within_module_core_rx(addr, mod))
68582+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
68583+ else if (within_module_core_rw(addr, mod))
68584+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
68585 else
68586- nextval = (unsigned long)mod->module_core+mod->core_text_size;
68587+ return NULL;
68588
68589 /* Scan for closest preceding symbol, and next symbol. (ELF
68590 starts real symbols at 1). */
68591@@ -3354,7 +3486,7 @@ static int m_show(struct seq_file *m, void *p)
68592 char buf[8];
68593
68594 seq_printf(m, "%s %u",
68595- mod->name, mod->init_size + mod->core_size);
68596+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
68597 print_unload_info(m, mod);
68598
68599 /* Informative for users. */
68600@@ -3363,7 +3495,7 @@ static int m_show(struct seq_file *m, void *p)
68601 mod->state == MODULE_STATE_COMING ? "Loading":
68602 "Live");
68603 /* Used by oprofile and other similar tools. */
68604- seq_printf(m, " 0x%pK", mod->module_core);
68605+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
68606
68607 /* Taints info */
68608 if (mod->taints)
68609@@ -3399,7 +3531,17 @@ static const struct file_operations proc_modules_operations = {
68610
68611 static int __init proc_modules_init(void)
68612 {
68613+#ifndef CONFIG_GRKERNSEC_HIDESYM
68614+#ifdef CONFIG_GRKERNSEC_PROC_USER
68615+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
68616+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68617+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
68618+#else
68619 proc_create("modules", 0, NULL, &proc_modules_operations);
68620+#endif
68621+#else
68622+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
68623+#endif
68624 return 0;
68625 }
68626 module_init(proc_modules_init);
68627@@ -3458,12 +3600,12 @@ struct module *__module_address(unsigned long addr)
68628 {
68629 struct module *mod;
68630
68631- if (addr < module_addr_min || addr > module_addr_max)
68632+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
68633+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
68634 return NULL;
68635
68636 list_for_each_entry_rcu(mod, &modules, list)
68637- if (within_module_core(addr, mod)
68638- || within_module_init(addr, mod))
68639+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
68640 return mod;
68641 return NULL;
68642 }
68643@@ -3497,11 +3639,20 @@ bool is_module_text_address(unsigned long addr)
68644 */
68645 struct module *__module_text_address(unsigned long addr)
68646 {
68647- struct module *mod = __module_address(addr);
68648+ struct module *mod;
68649+
68650+#ifdef CONFIG_X86_32
68651+ addr = ktla_ktva(addr);
68652+#endif
68653+
68654+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
68655+ return NULL;
68656+
68657+ mod = __module_address(addr);
68658+
68659 if (mod) {
68660 /* Make sure it's within the text section. */
68661- if (!within(addr, mod->module_init, mod->init_text_size)
68662- && !within(addr, mod->module_core, mod->core_text_size))
68663+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
68664 mod = NULL;
68665 }
68666 return mod;
68667diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
68668index 7e3443f..b2a1e6b 100644
68669--- a/kernel/mutex-debug.c
68670+++ b/kernel/mutex-debug.c
68671@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
68672 }
68673
68674 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68675- struct thread_info *ti)
68676+ struct task_struct *task)
68677 {
68678 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
68679
68680 /* Mark the current thread as blocked on the lock: */
68681- ti->task->blocked_on = waiter;
68682+ task->blocked_on = waiter;
68683 }
68684
68685 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68686- struct thread_info *ti)
68687+ struct task_struct *task)
68688 {
68689 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
68690- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
68691- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
68692- ti->task->blocked_on = NULL;
68693+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
68694+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
68695+ task->blocked_on = NULL;
68696
68697 list_del_init(&waiter->list);
68698 waiter->task = NULL;
68699diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
68700index 0799fd3..d06ae3b 100644
68701--- a/kernel/mutex-debug.h
68702+++ b/kernel/mutex-debug.h
68703@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
68704 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
68705 extern void debug_mutex_add_waiter(struct mutex *lock,
68706 struct mutex_waiter *waiter,
68707- struct thread_info *ti);
68708+ struct task_struct *task);
68709 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68710- struct thread_info *ti);
68711+ struct task_struct *task);
68712 extern void debug_mutex_unlock(struct mutex *lock);
68713 extern void debug_mutex_init(struct mutex *lock, const char *name,
68714 struct lock_class_key *key);
68715diff --git a/kernel/mutex.c b/kernel/mutex.c
68716index a307cc9..27fd2e9 100644
68717--- a/kernel/mutex.c
68718+++ b/kernel/mutex.c
68719@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68720 spin_lock_mutex(&lock->wait_lock, flags);
68721
68722 debug_mutex_lock_common(lock, &waiter);
68723- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
68724+ debug_mutex_add_waiter(lock, &waiter, task);
68725
68726 /* add waiting tasks to the end of the waitqueue (FIFO): */
68727 list_add_tail(&waiter.list, &lock->wait_list);
68728@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68729 * TASK_UNINTERRUPTIBLE case.)
68730 */
68731 if (unlikely(signal_pending_state(state, task))) {
68732- mutex_remove_waiter(lock, &waiter,
68733- task_thread_info(task));
68734+ mutex_remove_waiter(lock, &waiter, task);
68735 mutex_release(&lock->dep_map, 1, ip);
68736 spin_unlock_mutex(&lock->wait_lock, flags);
68737
68738@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68739 done:
68740 lock_acquired(&lock->dep_map, ip);
68741 /* got the lock - rejoice! */
68742- mutex_remove_waiter(lock, &waiter, current_thread_info());
68743+ mutex_remove_waiter(lock, &waiter, task);
68744 mutex_set_owner(lock);
68745
68746 /* set it to 0 if there are no waiters left: */
68747diff --git a/kernel/panic.c b/kernel/panic.c
68748index d2a5f4e..5edc1d9 100644
68749--- a/kernel/panic.c
68750+++ b/kernel/panic.c
68751@@ -75,6 +75,14 @@ void panic(const char *fmt, ...)
68752 int state = 0;
68753
68754 /*
68755+ * Disable local interrupts. This will prevent panic_smp_self_stop
68756+ * from deadlocking the first cpu that invokes the panic, since
68757+ * there is nothing to prevent an interrupt handler (that runs
68758+ * after the panic_lock is acquired) from invoking panic again.
68759+ */
68760+ local_irq_disable();
68761+
68762+ /*
68763 * It's possible to come here directly from a panic-assertion and
68764 * not have preempt disabled. Some functions called from here want
68765 * preempt to be disabled. No point enabling it later though...
68766@@ -402,7 +410,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
68767 const char *board;
68768
68769 printk(KERN_WARNING "------------[ cut here ]------------\n");
68770- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
68771+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
68772 board = dmi_get_system_info(DMI_PRODUCT_NAME);
68773 if (board)
68774 printk(KERN_WARNING "Hardware name: %s\n", board);
68775@@ -457,7 +465,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
68776 */
68777 void __stack_chk_fail(void)
68778 {
68779- panic("stack-protector: Kernel stack is corrupted in: %p\n",
68780+ dump_stack();
68781+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
68782 __builtin_return_address(0));
68783 }
68784 EXPORT_SYMBOL(__stack_chk_fail);
68785diff --git a/kernel/pid.c b/kernel/pid.c
68786index e86b291a..e8b0fb5 100644
68787--- a/kernel/pid.c
68788+++ b/kernel/pid.c
68789@@ -33,6 +33,7 @@
68790 #include <linux/rculist.h>
68791 #include <linux/bootmem.h>
68792 #include <linux/hash.h>
68793+#include <linux/security.h>
68794 #include <linux/pid_namespace.h>
68795 #include <linux/init_task.h>
68796 #include <linux/syscalls.h>
68797@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
68798
68799 int pid_max = PID_MAX_DEFAULT;
68800
68801-#define RESERVED_PIDS 300
68802+#define RESERVED_PIDS 500
68803
68804 int pid_max_min = RESERVED_PIDS + 1;
68805 int pid_max_max = PID_MAX_LIMIT;
68806@@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
68807 */
68808 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
68809 {
68810+ struct task_struct *task;
68811+
68812 rcu_lockdep_assert(rcu_read_lock_held(),
68813 "find_task_by_pid_ns() needs rcu_read_lock()"
68814 " protection");
68815- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
68816+
68817+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
68818+
68819+ if (gr_pid_is_chrooted(task))
68820+ return NULL;
68821+
68822+ return task;
68823 }
68824
68825 struct task_struct *find_task_by_vpid(pid_t vnr)
68826@@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
68827 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
68828 }
68829
68830+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
68831+{
68832+ rcu_lockdep_assert(rcu_read_lock_held(),
68833+ "find_task_by_pid_ns() needs rcu_read_lock()"
68834+ " protection");
68835+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
68836+}
68837+
68838 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
68839 {
68840 struct pid *pid;
68841diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
68842index 125cb67..a4d1c30 100644
68843--- a/kernel/posix-cpu-timers.c
68844+++ b/kernel/posix-cpu-timers.c
68845@@ -6,6 +6,7 @@
68846 #include <linux/posix-timers.h>
68847 #include <linux/errno.h>
68848 #include <linux/math64.h>
68849+#include <linux/security.h>
68850 #include <asm/uaccess.h>
68851 #include <linux/kernel_stat.h>
68852 #include <trace/events/timer.h>
68853@@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
68854
68855 static __init int init_posix_cpu_timers(void)
68856 {
68857- struct k_clock process = {
68858+ static struct k_clock process = {
68859 .clock_getres = process_cpu_clock_getres,
68860 .clock_get = process_cpu_clock_get,
68861 .timer_create = process_cpu_timer_create,
68862 .nsleep = process_cpu_nsleep,
68863 .nsleep_restart = process_cpu_nsleep_restart,
68864 };
68865- struct k_clock thread = {
68866+ static struct k_clock thread = {
68867 .clock_getres = thread_cpu_clock_getres,
68868 .clock_get = thread_cpu_clock_get,
68869 .timer_create = thread_cpu_timer_create,
68870diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
68871index 69185ae..cc2847a 100644
68872--- a/kernel/posix-timers.c
68873+++ b/kernel/posix-timers.c
68874@@ -43,6 +43,7 @@
68875 #include <linux/idr.h>
68876 #include <linux/posix-clock.h>
68877 #include <linux/posix-timers.h>
68878+#include <linux/grsecurity.h>
68879 #include <linux/syscalls.h>
68880 #include <linux/wait.h>
68881 #include <linux/workqueue.h>
68882@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
68883 * which we beg off on and pass to do_sys_settimeofday().
68884 */
68885
68886-static struct k_clock posix_clocks[MAX_CLOCKS];
68887+static struct k_clock *posix_clocks[MAX_CLOCKS];
68888
68889 /*
68890 * These ones are defined below.
68891@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
68892 */
68893 static __init int init_posix_timers(void)
68894 {
68895- struct k_clock clock_realtime = {
68896+ static struct k_clock clock_realtime = {
68897 .clock_getres = hrtimer_get_res,
68898 .clock_get = posix_clock_realtime_get,
68899 .clock_set = posix_clock_realtime_set,
68900@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
68901 .timer_get = common_timer_get,
68902 .timer_del = common_timer_del,
68903 };
68904- struct k_clock clock_monotonic = {
68905+ static struct k_clock clock_monotonic = {
68906 .clock_getres = hrtimer_get_res,
68907 .clock_get = posix_ktime_get_ts,
68908 .nsleep = common_nsleep,
68909@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
68910 .timer_get = common_timer_get,
68911 .timer_del = common_timer_del,
68912 };
68913- struct k_clock clock_monotonic_raw = {
68914+ static struct k_clock clock_monotonic_raw = {
68915 .clock_getres = hrtimer_get_res,
68916 .clock_get = posix_get_monotonic_raw,
68917 };
68918- struct k_clock clock_realtime_coarse = {
68919+ static struct k_clock clock_realtime_coarse = {
68920 .clock_getres = posix_get_coarse_res,
68921 .clock_get = posix_get_realtime_coarse,
68922 };
68923- struct k_clock clock_monotonic_coarse = {
68924+ static struct k_clock clock_monotonic_coarse = {
68925 .clock_getres = posix_get_coarse_res,
68926 .clock_get = posix_get_monotonic_coarse,
68927 };
68928- struct k_clock clock_boottime = {
68929+ static struct k_clock clock_boottime = {
68930 .clock_getres = hrtimer_get_res,
68931 .clock_get = posix_get_boottime,
68932 .nsleep = common_nsleep,
68933@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
68934 return;
68935 }
68936
68937- posix_clocks[clock_id] = *new_clock;
68938+ posix_clocks[clock_id] = new_clock;
68939 }
68940 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
68941
68942@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
68943 return (id & CLOCKFD_MASK) == CLOCKFD ?
68944 &clock_posix_dynamic : &clock_posix_cpu;
68945
68946- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
68947+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
68948 return NULL;
68949- return &posix_clocks[id];
68950+ return posix_clocks[id];
68951 }
68952
68953 static int common_timer_create(struct k_itimer *new_timer)
68954@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
68955 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
68956 return -EFAULT;
68957
68958+ /* only the CLOCK_REALTIME clock can be set, all other clocks
68959+ have their clock_set fptr set to a nosettime dummy function
68960+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
68961+ call common_clock_set, which calls do_sys_settimeofday, which
68962+ we hook
68963+ */
68964+
68965 return kc->clock_set(which_clock, &new_tp);
68966 }
68967
68968diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
68969index d523593..68197a4 100644
68970--- a/kernel/power/poweroff.c
68971+++ b/kernel/power/poweroff.c
68972@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
68973 .enable_mask = SYSRQ_ENABLE_BOOT,
68974 };
68975
68976-static int pm_sysrq_init(void)
68977+static int __init pm_sysrq_init(void)
68978 {
68979 register_sysrq_key('o', &sysrq_poweroff_op);
68980 return 0;
68981diff --git a/kernel/power/process.c b/kernel/power/process.c
68982index 19db29f..33b52b6 100644
68983--- a/kernel/power/process.c
68984+++ b/kernel/power/process.c
68985@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
68986 u64 elapsed_csecs64;
68987 unsigned int elapsed_csecs;
68988 bool wakeup = false;
68989+ bool timedout = false;
68990
68991 do_gettimeofday(&start);
68992
68993@@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
68994
68995 while (true) {
68996 todo = 0;
68997+ if (time_after(jiffies, end_time))
68998+ timedout = true;
68999 read_lock(&tasklist_lock);
69000 do_each_thread(g, p) {
69001 if (p == current || !freeze_task(p))
69002@@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
69003 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
69004 * transition can't race with task state testing here.
69005 */
69006- if (!task_is_stopped_or_traced(p) &&
69007- !freezer_should_skip(p))
69008+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
69009 todo++;
69010+ if (timedout) {
69011+ printk(KERN_ERR "Task refusing to freeze:\n");
69012+ sched_show_task(p);
69013+ }
69014+ }
69015 } while_each_thread(g, p);
69016 read_unlock(&tasklist_lock);
69017
69018@@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
69019 todo += wq_busy;
69020 }
69021
69022- if (!todo || time_after(jiffies, end_time))
69023+ if (!todo || timedout)
69024 break;
69025
69026 if (pm_wakeup_pending()) {
69027diff --git a/kernel/printk.c b/kernel/printk.c
69028index 146827f..a501fec 100644
69029--- a/kernel/printk.c
69030+++ b/kernel/printk.c
69031@@ -782,6 +782,11 @@ static int check_syslog_permissions(int type, bool from_file)
69032 if (from_file && type != SYSLOG_ACTION_OPEN)
69033 return 0;
69034
69035+#ifdef CONFIG_GRKERNSEC_DMESG
69036+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
69037+ return -EPERM;
69038+#endif
69039+
69040 if (syslog_action_restricted(type)) {
69041 if (capable(CAP_SYSLOG))
69042 return 0;
69043diff --git a/kernel/profile.c b/kernel/profile.c
69044index 76b8e77..a2930e8 100644
69045--- a/kernel/profile.c
69046+++ b/kernel/profile.c
69047@@ -39,7 +39,7 @@ struct profile_hit {
69048 /* Oprofile timer tick hook */
69049 static int (*timer_hook)(struct pt_regs *) __read_mostly;
69050
69051-static atomic_t *prof_buffer;
69052+static atomic_unchecked_t *prof_buffer;
69053 static unsigned long prof_len, prof_shift;
69054
69055 int prof_on __read_mostly;
69056@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
69057 hits[i].pc = 0;
69058 continue;
69059 }
69060- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
69061+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
69062 hits[i].hits = hits[i].pc = 0;
69063 }
69064 }
69065@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
69066 * Add the current hit(s) and flush the write-queue out
69067 * to the global buffer:
69068 */
69069- atomic_add(nr_hits, &prof_buffer[pc]);
69070+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
69071 for (i = 0; i < NR_PROFILE_HIT; ++i) {
69072- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
69073+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
69074 hits[i].pc = hits[i].hits = 0;
69075 }
69076 out:
69077@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
69078 {
69079 unsigned long pc;
69080 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
69081- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
69082+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
69083 }
69084 #endif /* !CONFIG_SMP */
69085
69086@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
69087 return -EFAULT;
69088 buf++; p++; count--; read++;
69089 }
69090- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
69091+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
69092 if (copy_to_user(buf, (void *)pnt, count))
69093 return -EFAULT;
69094 read += count;
69095@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
69096 }
69097 #endif
69098 profile_discard_flip_buffers();
69099- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
69100+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
69101 return count;
69102 }
69103
69104diff --git a/kernel/ptrace.c b/kernel/ptrace.c
69105index a232bb5..2a65ef9 100644
69106--- a/kernel/ptrace.c
69107+++ b/kernel/ptrace.c
69108@@ -279,7 +279,7 @@ static int ptrace_attach(struct task_struct *task, long request,
69109
69110 if (seize)
69111 flags |= PT_SEIZED;
69112- if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
69113+ if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
69114 flags |= PT_PTRACE_CAP;
69115 task->ptrace = flags;
69116
69117@@ -486,7 +486,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
69118 break;
69119 return -EIO;
69120 }
69121- if (copy_to_user(dst, buf, retval))
69122+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
69123 return -EFAULT;
69124 copied += retval;
69125 src += retval;
69126@@ -671,7 +671,7 @@ int ptrace_request(struct task_struct *child, long request,
69127 bool seized = child->ptrace & PT_SEIZED;
69128 int ret = -EIO;
69129 siginfo_t siginfo, *si;
69130- void __user *datavp = (void __user *) data;
69131+ void __user *datavp = (__force void __user *) data;
69132 unsigned long __user *datalp = datavp;
69133 unsigned long flags;
69134
69135@@ -873,14 +873,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
69136 goto out;
69137 }
69138
69139+ if (gr_handle_ptrace(child, request)) {
69140+ ret = -EPERM;
69141+ goto out_put_task_struct;
69142+ }
69143+
69144 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
69145 ret = ptrace_attach(child, request, addr, data);
69146 /*
69147 * Some architectures need to do book-keeping after
69148 * a ptrace attach.
69149 */
69150- if (!ret)
69151+ if (!ret) {
69152 arch_ptrace_attach(child);
69153+ gr_audit_ptrace(child);
69154+ }
69155 goto out_put_task_struct;
69156 }
69157
69158@@ -906,7 +913,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
69159 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
69160 if (copied != sizeof(tmp))
69161 return -EIO;
69162- return put_user(tmp, (unsigned long __user *)data);
69163+ return put_user(tmp, (__force unsigned long __user *)data);
69164 }
69165
69166 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
69167@@ -1016,14 +1023,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
69168 goto out;
69169 }
69170
69171+ if (gr_handle_ptrace(child, request)) {
69172+ ret = -EPERM;
69173+ goto out_put_task_struct;
69174+ }
69175+
69176 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
69177 ret = ptrace_attach(child, request, addr, data);
69178 /*
69179 * Some architectures need to do book-keeping after
69180 * a ptrace attach.
69181 */
69182- if (!ret)
69183+ if (!ret) {
69184 arch_ptrace_attach(child);
69185+ gr_audit_ptrace(child);
69186+ }
69187 goto out_put_task_struct;
69188 }
69189
69190diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
69191index 37a5444..eec170a 100644
69192--- a/kernel/rcutiny.c
69193+++ b/kernel/rcutiny.c
69194@@ -46,7 +46,7 @@
69195 struct rcu_ctrlblk;
69196 static void invoke_rcu_callbacks(void);
69197 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
69198-static void rcu_process_callbacks(struct softirq_action *unused);
69199+static void rcu_process_callbacks(void);
69200 static void __call_rcu(struct rcu_head *head,
69201 void (*func)(struct rcu_head *rcu),
69202 struct rcu_ctrlblk *rcp);
69203@@ -307,7 +307,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
69204 rcu_is_callbacks_kthread()));
69205 }
69206
69207-static void rcu_process_callbacks(struct softirq_action *unused)
69208+static void rcu_process_callbacks(void)
69209 {
69210 __rcu_process_callbacks(&rcu_sched_ctrlblk);
69211 __rcu_process_callbacks(&rcu_bh_ctrlblk);
69212diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
69213index fc31a2d..be2ec04 100644
69214--- a/kernel/rcutiny_plugin.h
69215+++ b/kernel/rcutiny_plugin.h
69216@@ -939,7 +939,7 @@ static int rcu_kthread(void *arg)
69217 have_rcu_kthread_work = morework;
69218 local_irq_restore(flags);
69219 if (work)
69220- rcu_process_callbacks(NULL);
69221+ rcu_process_callbacks();
69222 schedule_timeout_interruptible(1); /* Leave CPU for others. */
69223 }
69224
69225diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
69226index e66b34a..4b8b626 100644
69227--- a/kernel/rcutorture.c
69228+++ b/kernel/rcutorture.c
69229@@ -163,12 +163,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
69230 { 0 };
69231 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
69232 { 0 };
69233-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
69234-static atomic_t n_rcu_torture_alloc;
69235-static atomic_t n_rcu_torture_alloc_fail;
69236-static atomic_t n_rcu_torture_free;
69237-static atomic_t n_rcu_torture_mberror;
69238-static atomic_t n_rcu_torture_error;
69239+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
69240+static atomic_unchecked_t n_rcu_torture_alloc;
69241+static atomic_unchecked_t n_rcu_torture_alloc_fail;
69242+static atomic_unchecked_t n_rcu_torture_free;
69243+static atomic_unchecked_t n_rcu_torture_mberror;
69244+static atomic_unchecked_t n_rcu_torture_error;
69245 static long n_rcu_torture_barrier_error;
69246 static long n_rcu_torture_boost_ktrerror;
69247 static long n_rcu_torture_boost_rterror;
69248@@ -265,11 +265,11 @@ rcu_torture_alloc(void)
69249
69250 spin_lock_bh(&rcu_torture_lock);
69251 if (list_empty(&rcu_torture_freelist)) {
69252- atomic_inc(&n_rcu_torture_alloc_fail);
69253+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
69254 spin_unlock_bh(&rcu_torture_lock);
69255 return NULL;
69256 }
69257- atomic_inc(&n_rcu_torture_alloc);
69258+ atomic_inc_unchecked(&n_rcu_torture_alloc);
69259 p = rcu_torture_freelist.next;
69260 list_del_init(p);
69261 spin_unlock_bh(&rcu_torture_lock);
69262@@ -282,7 +282,7 @@ rcu_torture_alloc(void)
69263 static void
69264 rcu_torture_free(struct rcu_torture *p)
69265 {
69266- atomic_inc(&n_rcu_torture_free);
69267+ atomic_inc_unchecked(&n_rcu_torture_free);
69268 spin_lock_bh(&rcu_torture_lock);
69269 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
69270 spin_unlock_bh(&rcu_torture_lock);
69271@@ -403,7 +403,7 @@ rcu_torture_cb(struct rcu_head *p)
69272 i = rp->rtort_pipe_count;
69273 if (i > RCU_TORTURE_PIPE_LEN)
69274 i = RCU_TORTURE_PIPE_LEN;
69275- atomic_inc(&rcu_torture_wcount[i]);
69276+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
69277 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
69278 rp->rtort_mbtest = 0;
69279 rcu_torture_free(rp);
69280@@ -451,7 +451,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
69281 i = rp->rtort_pipe_count;
69282 if (i > RCU_TORTURE_PIPE_LEN)
69283 i = RCU_TORTURE_PIPE_LEN;
69284- atomic_inc(&rcu_torture_wcount[i]);
69285+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
69286 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
69287 rp->rtort_mbtest = 0;
69288 list_del(&rp->rtort_free);
69289@@ -983,7 +983,7 @@ rcu_torture_writer(void *arg)
69290 i = old_rp->rtort_pipe_count;
69291 if (i > RCU_TORTURE_PIPE_LEN)
69292 i = RCU_TORTURE_PIPE_LEN;
69293- atomic_inc(&rcu_torture_wcount[i]);
69294+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
69295 old_rp->rtort_pipe_count++;
69296 cur_ops->deferred_free(old_rp);
69297 }
69298@@ -1064,7 +1064,7 @@ static void rcu_torture_timer(unsigned long unused)
69299 }
69300 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
69301 if (p->rtort_mbtest == 0)
69302- atomic_inc(&n_rcu_torture_mberror);
69303+ atomic_inc_unchecked(&n_rcu_torture_mberror);
69304 spin_lock(&rand_lock);
69305 cur_ops->read_delay(&rand);
69306 n_rcu_torture_timers++;
69307@@ -1128,7 +1128,7 @@ rcu_torture_reader(void *arg)
69308 }
69309 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
69310 if (p->rtort_mbtest == 0)
69311- atomic_inc(&n_rcu_torture_mberror);
69312+ atomic_inc_unchecked(&n_rcu_torture_mberror);
69313 cur_ops->read_delay(&rand);
69314 preempt_disable();
69315 pipe_count = p->rtort_pipe_count;
69316@@ -1191,10 +1191,10 @@ rcu_torture_printk(char *page)
69317 rcu_torture_current,
69318 rcu_torture_current_version,
69319 list_empty(&rcu_torture_freelist),
69320- atomic_read(&n_rcu_torture_alloc),
69321- atomic_read(&n_rcu_torture_alloc_fail),
69322- atomic_read(&n_rcu_torture_free),
69323- atomic_read(&n_rcu_torture_mberror),
69324+ atomic_read_unchecked(&n_rcu_torture_alloc),
69325+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
69326+ atomic_read_unchecked(&n_rcu_torture_free),
69327+ atomic_read_unchecked(&n_rcu_torture_mberror),
69328 n_rcu_torture_boost_ktrerror,
69329 n_rcu_torture_boost_rterror,
69330 n_rcu_torture_boost_failure,
69331@@ -1208,14 +1208,14 @@ rcu_torture_printk(char *page)
69332 n_barrier_attempts,
69333 n_rcu_torture_barrier_error);
69334 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
69335- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
69336+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
69337 n_rcu_torture_barrier_error != 0 ||
69338 n_rcu_torture_boost_ktrerror != 0 ||
69339 n_rcu_torture_boost_rterror != 0 ||
69340 n_rcu_torture_boost_failure != 0 ||
69341 i > 1) {
69342 cnt += sprintf(&page[cnt], "!!! ");
69343- atomic_inc(&n_rcu_torture_error);
69344+ atomic_inc_unchecked(&n_rcu_torture_error);
69345 WARN_ON_ONCE(1);
69346 }
69347 cnt += sprintf(&page[cnt], "Reader Pipe: ");
69348@@ -1229,7 +1229,7 @@ rcu_torture_printk(char *page)
69349 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
69350 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
69351 cnt += sprintf(&page[cnt], " %d",
69352- atomic_read(&rcu_torture_wcount[i]));
69353+ atomic_read_unchecked(&rcu_torture_wcount[i]));
69354 }
69355 cnt += sprintf(&page[cnt], "\n");
69356 if (cur_ops->stats)
69357@@ -1888,7 +1888,7 @@ rcu_torture_cleanup(void)
69358
69359 if (cur_ops->cleanup)
69360 cur_ops->cleanup();
69361- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
69362+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
69363 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
69364 else if (n_online_successes != n_online_attempts ||
69365 n_offline_successes != n_offline_attempts)
69366@@ -1958,18 +1958,18 @@ rcu_torture_init(void)
69367
69368 rcu_torture_current = NULL;
69369 rcu_torture_current_version = 0;
69370- atomic_set(&n_rcu_torture_alloc, 0);
69371- atomic_set(&n_rcu_torture_alloc_fail, 0);
69372- atomic_set(&n_rcu_torture_free, 0);
69373- atomic_set(&n_rcu_torture_mberror, 0);
69374- atomic_set(&n_rcu_torture_error, 0);
69375+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
69376+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
69377+ atomic_set_unchecked(&n_rcu_torture_free, 0);
69378+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
69379+ atomic_set_unchecked(&n_rcu_torture_error, 0);
69380 n_rcu_torture_barrier_error = 0;
69381 n_rcu_torture_boost_ktrerror = 0;
69382 n_rcu_torture_boost_rterror = 0;
69383 n_rcu_torture_boost_failure = 0;
69384 n_rcu_torture_boosts = 0;
69385 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
69386- atomic_set(&rcu_torture_wcount[i], 0);
69387+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
69388 for_each_possible_cpu(cpu) {
69389 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
69390 per_cpu(rcu_torture_count, cpu)[i] = 0;
69391diff --git a/kernel/rcutree.c b/kernel/rcutree.c
69392index 4b97bba..b92c9d2 100644
69393--- a/kernel/rcutree.c
69394+++ b/kernel/rcutree.c
69395@@ -366,9 +366,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
69396 rcu_prepare_for_idle(smp_processor_id());
69397 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
69398 smp_mb__before_atomic_inc(); /* See above. */
69399- atomic_inc(&rdtp->dynticks);
69400+ atomic_inc_unchecked(&rdtp->dynticks);
69401 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
69402- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
69403+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
69404
69405 /*
69406 * The idle task is not permitted to enter the idle loop while
69407@@ -457,10 +457,10 @@ void rcu_irq_exit(void)
69408 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
69409 {
69410 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
69411- atomic_inc(&rdtp->dynticks);
69412+ atomic_inc_unchecked(&rdtp->dynticks);
69413 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
69414 smp_mb__after_atomic_inc(); /* See above. */
69415- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
69416+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
69417 rcu_cleanup_after_idle(smp_processor_id());
69418 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
69419 if (!is_idle_task(current)) {
69420@@ -554,14 +554,14 @@ void rcu_nmi_enter(void)
69421 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
69422
69423 if (rdtp->dynticks_nmi_nesting == 0 &&
69424- (atomic_read(&rdtp->dynticks) & 0x1))
69425+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
69426 return;
69427 rdtp->dynticks_nmi_nesting++;
69428 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
69429- atomic_inc(&rdtp->dynticks);
69430+ atomic_inc_unchecked(&rdtp->dynticks);
69431 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
69432 smp_mb__after_atomic_inc(); /* See above. */
69433- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
69434+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
69435 }
69436
69437 /**
69438@@ -580,9 +580,9 @@ void rcu_nmi_exit(void)
69439 return;
69440 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
69441 smp_mb__before_atomic_inc(); /* See above. */
69442- atomic_inc(&rdtp->dynticks);
69443+ atomic_inc_unchecked(&rdtp->dynticks);
69444 smp_mb__after_atomic_inc(); /* Force delay to next write. */
69445- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
69446+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
69447 }
69448
69449 #ifdef CONFIG_PROVE_RCU
69450@@ -598,7 +598,7 @@ int rcu_is_cpu_idle(void)
69451 int ret;
69452
69453 preempt_disable();
69454- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
69455+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
69456 preempt_enable();
69457 return ret;
69458 }
69459@@ -668,7 +668,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
69460 */
69461 static int dyntick_save_progress_counter(struct rcu_data *rdp)
69462 {
69463- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
69464+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
69465 return (rdp->dynticks_snap & 0x1) == 0;
69466 }
69467
69468@@ -683,7 +683,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
69469 unsigned int curr;
69470 unsigned int snap;
69471
69472- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
69473+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
69474 snap = (unsigned int)rdp->dynticks_snap;
69475
69476 /*
69477@@ -713,10 +713,10 @@ static int jiffies_till_stall_check(void)
69478 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
69479 */
69480 if (till_stall_check < 3) {
69481- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
69482+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
69483 till_stall_check = 3;
69484 } else if (till_stall_check > 300) {
69485- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
69486+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
69487 till_stall_check = 300;
69488 }
69489 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
69490@@ -1824,7 +1824,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
69491 /*
69492 * Do RCU core processing for the current CPU.
69493 */
69494-static void rcu_process_callbacks(struct softirq_action *unused)
69495+static void rcu_process_callbacks(void)
69496 {
69497 trace_rcu_utilization("Start RCU core");
69498 __rcu_process_callbacks(&rcu_sched_state,
69499@@ -2042,8 +2042,8 @@ void synchronize_rcu_bh(void)
69500 }
69501 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
69502
69503-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
69504-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
69505+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
69506+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
69507
69508 static int synchronize_sched_expedited_cpu_stop(void *data)
69509 {
69510@@ -2104,7 +2104,7 @@ void synchronize_sched_expedited(void)
69511 int firstsnap, s, snap, trycount = 0;
69512
69513 /* Note that atomic_inc_return() implies full memory barrier. */
69514- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
69515+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
69516 get_online_cpus();
69517 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
69518
69519@@ -2126,7 +2126,7 @@ void synchronize_sched_expedited(void)
69520 }
69521
69522 /* Check to see if someone else did our work for us. */
69523- s = atomic_read(&sync_sched_expedited_done);
69524+ s = atomic_read_unchecked(&sync_sched_expedited_done);
69525 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
69526 smp_mb(); /* ensure test happens before caller kfree */
69527 return;
69528@@ -2141,7 +2141,7 @@ void synchronize_sched_expedited(void)
69529 * grace period works for us.
69530 */
69531 get_online_cpus();
69532- snap = atomic_read(&sync_sched_expedited_started);
69533+ snap = atomic_read_unchecked(&sync_sched_expedited_started);
69534 smp_mb(); /* ensure read is before try_stop_cpus(). */
69535 }
69536
69537@@ -2152,12 +2152,12 @@ void synchronize_sched_expedited(void)
69538 * than we did beat us to the punch.
69539 */
69540 do {
69541- s = atomic_read(&sync_sched_expedited_done);
69542+ s = atomic_read_unchecked(&sync_sched_expedited_done);
69543 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
69544 smp_mb(); /* ensure test happens before caller kfree */
69545 break;
69546 }
69547- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
69548+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
69549
69550 put_online_cpus();
69551 }
69552@@ -2421,7 +2421,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
69553 rdp->qlen = 0;
69554 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
69555 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
69556- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
69557+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
69558 rdp->cpu = cpu;
69559 rdp->rsp = rsp;
69560 raw_spin_unlock_irqrestore(&rnp->lock, flags);
69561@@ -2449,8 +2449,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
69562 rdp->n_force_qs_snap = rsp->n_force_qs;
69563 rdp->blimit = blimit;
69564 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
69565- atomic_set(&rdp->dynticks->dynticks,
69566- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
69567+ atomic_set_unchecked(&rdp->dynticks->dynticks,
69568+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
69569 rcu_prepare_for_idle_init(cpu);
69570 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
69571
69572diff --git a/kernel/rcutree.h b/kernel/rcutree.h
69573index 19b61ac..5c60a94 100644
69574--- a/kernel/rcutree.h
69575+++ b/kernel/rcutree.h
69576@@ -83,7 +83,7 @@ struct rcu_dynticks {
69577 long long dynticks_nesting; /* Track irq/process nesting level. */
69578 /* Process level is worth LLONG_MAX/2. */
69579 int dynticks_nmi_nesting; /* Track NMI nesting level. */
69580- atomic_t dynticks; /* Even value for idle, else odd. */
69581+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
69582 #ifdef CONFIG_RCU_FAST_NO_HZ
69583 int dyntick_drain; /* Prepare-for-idle state variable. */
69584 unsigned long dyntick_holdoff;
69585diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
69586index 3e48994..d94f03a 100644
69587--- a/kernel/rcutree_plugin.h
69588+++ b/kernel/rcutree_plugin.h
69589@@ -909,7 +909,7 @@ void synchronize_rcu_expedited(void)
69590
69591 /* Clean up and exit. */
69592 smp_mb(); /* ensure expedited GP seen before counter increment. */
69593- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
69594+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
69595 unlock_mb_ret:
69596 mutex_unlock(&sync_rcu_preempt_exp_mutex);
69597 mb_ret:
69598diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
69599index d4bc16d..c234a5c 100644
69600--- a/kernel/rcutree_trace.c
69601+++ b/kernel/rcutree_trace.c
69602@@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
69603 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
69604 rdp->qs_pending);
69605 seq_printf(m, " dt=%d/%llx/%d df=%lu",
69606- atomic_read(&rdp->dynticks->dynticks),
69607+ atomic_read_unchecked(&rdp->dynticks->dynticks),
69608 rdp->dynticks->dynticks_nesting,
69609 rdp->dynticks->dynticks_nmi_nesting,
69610 rdp->dynticks_fqs);
69611@@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
69612 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
69613 rdp->qs_pending);
69614 seq_printf(m, ",%d,%llx,%d,%lu",
69615- atomic_read(&rdp->dynticks->dynticks),
69616+ atomic_read_unchecked(&rdp->dynticks->dynticks),
69617 rdp->dynticks->dynticks_nesting,
69618 rdp->dynticks->dynticks_nmi_nesting,
69619 rdp->dynticks_fqs);
69620diff --git a/kernel/resource.c b/kernel/resource.c
69621index e1d2b8e..24820bb 100644
69622--- a/kernel/resource.c
69623+++ b/kernel/resource.c
69624@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
69625
69626 static int __init ioresources_init(void)
69627 {
69628+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69629+#ifdef CONFIG_GRKERNSEC_PROC_USER
69630+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
69631+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
69632+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69633+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
69634+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
69635+#endif
69636+#else
69637 proc_create("ioports", 0, NULL, &proc_ioports_operations);
69638 proc_create("iomem", 0, NULL, &proc_iomem_operations);
69639+#endif
69640 return 0;
69641 }
69642 __initcall(ioresources_init);
69643diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
69644index 98ec494..4241d6d 100644
69645--- a/kernel/rtmutex-tester.c
69646+++ b/kernel/rtmutex-tester.c
69647@@ -20,7 +20,7 @@
69648 #define MAX_RT_TEST_MUTEXES 8
69649
69650 static spinlock_t rttest_lock;
69651-static atomic_t rttest_event;
69652+static atomic_unchecked_t rttest_event;
69653
69654 struct test_thread_data {
69655 int opcode;
69656@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69657
69658 case RTTEST_LOCKCONT:
69659 td->mutexes[td->opdata] = 1;
69660- td->event = atomic_add_return(1, &rttest_event);
69661+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69662 return 0;
69663
69664 case RTTEST_RESET:
69665@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69666 return 0;
69667
69668 case RTTEST_RESETEVENT:
69669- atomic_set(&rttest_event, 0);
69670+ atomic_set_unchecked(&rttest_event, 0);
69671 return 0;
69672
69673 default:
69674@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69675 return ret;
69676
69677 td->mutexes[id] = 1;
69678- td->event = atomic_add_return(1, &rttest_event);
69679+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69680 rt_mutex_lock(&mutexes[id]);
69681- td->event = atomic_add_return(1, &rttest_event);
69682+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69683 td->mutexes[id] = 4;
69684 return 0;
69685
69686@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69687 return ret;
69688
69689 td->mutexes[id] = 1;
69690- td->event = atomic_add_return(1, &rttest_event);
69691+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69692 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
69693- td->event = atomic_add_return(1, &rttest_event);
69694+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69695 td->mutexes[id] = ret ? 0 : 4;
69696 return ret ? -EINTR : 0;
69697
69698@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69699 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
69700 return ret;
69701
69702- td->event = atomic_add_return(1, &rttest_event);
69703+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69704 rt_mutex_unlock(&mutexes[id]);
69705- td->event = atomic_add_return(1, &rttest_event);
69706+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69707 td->mutexes[id] = 0;
69708 return 0;
69709
69710@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
69711 break;
69712
69713 td->mutexes[dat] = 2;
69714- td->event = atomic_add_return(1, &rttest_event);
69715+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69716 break;
69717
69718 default:
69719@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
69720 return;
69721
69722 td->mutexes[dat] = 3;
69723- td->event = atomic_add_return(1, &rttest_event);
69724+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69725 break;
69726
69727 case RTTEST_LOCKNOWAIT:
69728@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
69729 return;
69730
69731 td->mutexes[dat] = 1;
69732- td->event = atomic_add_return(1, &rttest_event);
69733+ td->event = atomic_add_return_unchecked(1, &rttest_event);
69734 return;
69735
69736 default:
69737diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
69738index 0984a21..939f183 100644
69739--- a/kernel/sched/auto_group.c
69740+++ b/kernel/sched/auto_group.c
69741@@ -11,7 +11,7 @@
69742
69743 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
69744 static struct autogroup autogroup_default;
69745-static atomic_t autogroup_seq_nr;
69746+static atomic_unchecked_t autogroup_seq_nr;
69747
69748 void __init autogroup_init(struct task_struct *init_task)
69749 {
69750@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
69751
69752 kref_init(&ag->kref);
69753 init_rwsem(&ag->lock);
69754- ag->id = atomic_inc_return(&autogroup_seq_nr);
69755+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
69756 ag->tg = tg;
69757 #ifdef CONFIG_RT_GROUP_SCHED
69758 /*
69759diff --git a/kernel/sched/core.c b/kernel/sched/core.c
69760index 39c44fa..70edb8b 100644
69761--- a/kernel/sched/core.c
69762+++ b/kernel/sched/core.c
69763@@ -4103,6 +4103,8 @@ int can_nice(const struct task_struct *p, const int nice)
69764 /* convert nice value [19,-20] to rlimit style value [1,40] */
69765 int nice_rlim = 20 - nice;
69766
69767+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
69768+
69769 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
69770 capable(CAP_SYS_NICE));
69771 }
69772@@ -4136,7 +4138,8 @@ SYSCALL_DEFINE1(nice, int, increment)
69773 if (nice > 19)
69774 nice = 19;
69775
69776- if (increment < 0 && !can_nice(current, nice))
69777+ if (increment < 0 && (!can_nice(current, nice) ||
69778+ gr_handle_chroot_nice()))
69779 return -EPERM;
69780
69781 retval = security_task_setnice(current, nice);
69782@@ -4290,6 +4293,7 @@ recheck:
69783 unsigned long rlim_rtprio =
69784 task_rlimit(p, RLIMIT_RTPRIO);
69785
69786+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
69787 /* can't set/change the rt policy */
69788 if (policy != p->policy && !rlim_rtprio)
69789 return -EPERM;
69790diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
69791index c099cc6..06aec4f 100644
69792--- a/kernel/sched/fair.c
69793+++ b/kernel/sched/fair.c
69794@@ -4846,7 +4846,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
69795 * run_rebalance_domains is triggered when needed from the scheduler tick.
69796 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
69797 */
69798-static void run_rebalance_domains(struct softirq_action *h)
69799+static void run_rebalance_domains(void)
69800 {
69801 int this_cpu = smp_processor_id();
69802 struct rq *this_rq = cpu_rq(this_cpu);
69803diff --git a/kernel/signal.c b/kernel/signal.c
69804index 6771027..763e51e 100644
69805--- a/kernel/signal.c
69806+++ b/kernel/signal.c
69807@@ -48,12 +48,12 @@ static struct kmem_cache *sigqueue_cachep;
69808
69809 int print_fatal_signals __read_mostly;
69810
69811-static void __user *sig_handler(struct task_struct *t, int sig)
69812+static __sighandler_t sig_handler(struct task_struct *t, int sig)
69813 {
69814 return t->sighand->action[sig - 1].sa.sa_handler;
69815 }
69816
69817-static int sig_handler_ignored(void __user *handler, int sig)
69818+static int sig_handler_ignored(__sighandler_t handler, int sig)
69819 {
69820 /* Is it explicitly or implicitly ignored? */
69821 return handler == SIG_IGN ||
69822@@ -62,7 +62,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
69823
69824 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
69825 {
69826- void __user *handler;
69827+ __sighandler_t handler;
69828
69829 handler = sig_handler(t, sig);
69830
69831@@ -366,6 +366,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
69832 atomic_inc(&user->sigpending);
69833 rcu_read_unlock();
69834
69835+ if (!override_rlimit)
69836+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
69837+
69838 if (override_rlimit ||
69839 atomic_read(&user->sigpending) <=
69840 task_rlimit(t, RLIMIT_SIGPENDING)) {
69841@@ -490,7 +493,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
69842
69843 int unhandled_signal(struct task_struct *tsk, int sig)
69844 {
69845- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
69846+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
69847 if (is_global_init(tsk))
69848 return 1;
69849 if (handler != SIG_IGN && handler != SIG_DFL)
69850@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
69851 }
69852 }
69853
69854+ /* allow glibc communication via tgkill to other threads in our
69855+ thread group */
69856+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
69857+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
69858+ && gr_handle_signal(t, sig))
69859+ return -EPERM;
69860+
69861 return security_task_kill(t, info, sig, 0);
69862 }
69863
69864@@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
69865 return send_signal(sig, info, p, 1);
69866 }
69867
69868-static int
69869+int
69870 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
69871 {
69872 return send_signal(sig, info, t, 0);
69873@@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
69874 unsigned long int flags;
69875 int ret, blocked, ignored;
69876 struct k_sigaction *action;
69877+ int is_unhandled = 0;
69878
69879 spin_lock_irqsave(&t->sighand->siglock, flags);
69880 action = &t->sighand->action[sig-1];
69881@@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
69882 }
69883 if (action->sa.sa_handler == SIG_DFL)
69884 t->signal->flags &= ~SIGNAL_UNKILLABLE;
69885+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
69886+ is_unhandled = 1;
69887 ret = specific_send_sig_info(sig, info, t);
69888 spin_unlock_irqrestore(&t->sighand->siglock, flags);
69889
69890+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
69891+ normal operation */
69892+ if (is_unhandled) {
69893+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
69894+ gr_handle_crash(t, sig);
69895+ }
69896+
69897 return ret;
69898 }
69899
69900@@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
69901 ret = check_kill_permission(sig, info, p);
69902 rcu_read_unlock();
69903
69904- if (!ret && sig)
69905+ if (!ret && sig) {
69906 ret = do_send_sig_info(sig, info, p, true);
69907+ if (!ret)
69908+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
69909+ }
69910
69911 return ret;
69912 }
69913@@ -2858,7 +2881,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
69914 int error = -ESRCH;
69915
69916 rcu_read_lock();
69917- p = find_task_by_vpid(pid);
69918+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69919+ /* allow glibc communication via tgkill to other threads in our
69920+ thread group */
69921+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
69922+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
69923+ p = find_task_by_vpid_unrestricted(pid);
69924+ else
69925+#endif
69926+ p = find_task_by_vpid(pid);
69927 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
69928 error = check_kill_permission(sig, info, p);
69929 /*
69930diff --git a/kernel/smp.c b/kernel/smp.c
69931index d0ae5b2..b87c5a8 100644
69932--- a/kernel/smp.c
69933+++ b/kernel/smp.c
69934@@ -582,22 +582,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
69935 }
69936 EXPORT_SYMBOL(smp_call_function);
69937
69938-void ipi_call_lock(void)
69939+void ipi_call_lock(void) __acquires(call_function.lock)
69940 {
69941 raw_spin_lock(&call_function.lock);
69942 }
69943
69944-void ipi_call_unlock(void)
69945+void ipi_call_unlock(void) __releases(call_function.lock)
69946 {
69947 raw_spin_unlock(&call_function.lock);
69948 }
69949
69950-void ipi_call_lock_irq(void)
69951+void ipi_call_lock_irq(void) __acquires(call_function.lock)
69952 {
69953 raw_spin_lock_irq(&call_function.lock);
69954 }
69955
69956-void ipi_call_unlock_irq(void)
69957+void ipi_call_unlock_irq(void) __releases(call_function.lock)
69958 {
69959 raw_spin_unlock_irq(&call_function.lock);
69960 }
69961diff --git a/kernel/softirq.c b/kernel/softirq.c
69962index 671f959..91c51cb 100644
69963--- a/kernel/softirq.c
69964+++ b/kernel/softirq.c
69965@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
69966
69967 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
69968
69969-char *softirq_to_name[NR_SOFTIRQS] = {
69970+const char * const softirq_to_name[NR_SOFTIRQS] = {
69971 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
69972 "TASKLET", "SCHED", "HRTIMER", "RCU"
69973 };
69974@@ -235,7 +235,7 @@ restart:
69975 kstat_incr_softirqs_this_cpu(vec_nr);
69976
69977 trace_softirq_entry(vec_nr);
69978- h->action(h);
69979+ h->action();
69980 trace_softirq_exit(vec_nr);
69981 if (unlikely(prev_count != preempt_count())) {
69982 printk(KERN_ERR "huh, entered softirq %u %s %p"
69983@@ -381,9 +381,11 @@ void __raise_softirq_irqoff(unsigned int nr)
69984 or_softirq_pending(1UL << nr);
69985 }
69986
69987-void open_softirq(int nr, void (*action)(struct softirq_action *))
69988+void open_softirq(int nr, void (*action)(void))
69989 {
69990- softirq_vec[nr].action = action;
69991+ pax_open_kernel();
69992+ *(void **)&softirq_vec[nr].action = action;
69993+ pax_close_kernel();
69994 }
69995
69996 /*
69997@@ -437,7 +439,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
69998
69999 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
70000
70001-static void tasklet_action(struct softirq_action *a)
70002+static void tasklet_action(void)
70003 {
70004 struct tasklet_struct *list;
70005
70006@@ -472,7 +474,7 @@ static void tasklet_action(struct softirq_action *a)
70007 }
70008 }
70009
70010-static void tasklet_hi_action(struct softirq_action *a)
70011+static void tasklet_hi_action(void)
70012 {
70013 struct tasklet_struct *list;
70014
70015diff --git a/kernel/srcu.c b/kernel/srcu.c
70016index 2095be3..9a5b89d 100644
70017--- a/kernel/srcu.c
70018+++ b/kernel/srcu.c
70019@@ -302,9 +302,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
70020 preempt_disable();
70021 idx = rcu_dereference_index_check(sp->completed,
70022 rcu_read_lock_sched_held()) & 0x1;
70023- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
70024+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
70025 smp_mb(); /* B */ /* Avoid leaking the critical section. */
70026- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
70027+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
70028 preempt_enable();
70029 return idx;
70030 }
70031@@ -320,7 +320,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
70032 {
70033 preempt_disable();
70034 smp_mb(); /* C */ /* Avoid leaking the critical section. */
70035- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
70036+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
70037 preempt_enable();
70038 }
70039 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
70040diff --git a/kernel/sys.c b/kernel/sys.c
70041index 2d39a84..f778b49 100644
70042--- a/kernel/sys.c
70043+++ b/kernel/sys.c
70044@@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
70045 error = -EACCES;
70046 goto out;
70047 }
70048+
70049+ if (gr_handle_chroot_setpriority(p, niceval)) {
70050+ error = -EACCES;
70051+ goto out;
70052+ }
70053+
70054 no_nice = security_task_setnice(p, niceval);
70055 if (no_nice) {
70056 error = no_nice;
70057@@ -594,6 +600,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
70058 goto error;
70059 }
70060
70061+ if (gr_check_group_change(new->gid, new->egid, -1))
70062+ goto error;
70063+
70064 if (rgid != (gid_t) -1 ||
70065 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
70066 new->sgid = new->egid;
70067@@ -629,6 +638,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
70068 old = current_cred();
70069
70070 retval = -EPERM;
70071+
70072+ if (gr_check_group_change(kgid, kgid, kgid))
70073+ goto error;
70074+
70075 if (nsown_capable(CAP_SETGID))
70076 new->gid = new->egid = new->sgid = new->fsgid = kgid;
70077 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
70078@@ -646,7 +659,7 @@ error:
70079 /*
70080 * change the user struct in a credentials set to match the new UID
70081 */
70082-static int set_user(struct cred *new)
70083+int set_user(struct cred *new)
70084 {
70085 struct user_struct *new_user;
70086
70087@@ -726,6 +739,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
70088 goto error;
70089 }
70090
70091+ if (gr_check_user_change(new->uid, new->euid, -1))
70092+ goto error;
70093+
70094 if (!uid_eq(new->uid, old->uid)) {
70095 retval = set_user(new);
70096 if (retval < 0)
70097@@ -776,6 +792,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
70098 old = current_cred();
70099
70100 retval = -EPERM;
70101+
70102+ if (gr_check_crash_uid(kuid))
70103+ goto error;
70104+ if (gr_check_user_change(kuid, kuid, kuid))
70105+ goto error;
70106+
70107 if (nsown_capable(CAP_SETUID)) {
70108 new->suid = new->uid = kuid;
70109 if (!uid_eq(kuid, old->uid)) {
70110@@ -845,6 +867,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
70111 goto error;
70112 }
70113
70114+ if (gr_check_user_change(kruid, keuid, -1))
70115+ goto error;
70116+
70117 if (ruid != (uid_t) -1) {
70118 new->uid = kruid;
70119 if (!uid_eq(kruid, old->uid)) {
70120@@ -927,6 +952,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
70121 goto error;
70122 }
70123
70124+ if (gr_check_group_change(krgid, kegid, -1))
70125+ goto error;
70126+
70127 if (rgid != (gid_t) -1)
70128 new->gid = krgid;
70129 if (egid != (gid_t) -1)
70130@@ -980,6 +1008,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
70131 if (!uid_valid(kuid))
70132 return old_fsuid;
70133
70134+ if (gr_check_user_change(-1, -1, kuid))
70135+ goto error;
70136+
70137 new = prepare_creds();
70138 if (!new)
70139 return old_fsuid;
70140@@ -994,6 +1025,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
70141 }
70142 }
70143
70144+error:
70145 abort_creds(new);
70146 return old_fsuid;
70147
70148@@ -1026,12 +1058,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
70149 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
70150 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
70151 nsown_capable(CAP_SETGID)) {
70152+ if (gr_check_group_change(-1, -1, kgid))
70153+ goto error;
70154+
70155 if (!gid_eq(kgid, old->fsgid)) {
70156 new->fsgid = kgid;
70157 goto change_okay;
70158 }
70159 }
70160
70161+error:
70162 abort_creds(new);
70163 return old_fsgid;
70164
70165@@ -1283,7 +1319,10 @@ static int override_release(char __user *release, int len)
70166 }
70167 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
70168 snprintf(buf, len, "2.6.%u%s", v, rest);
70169- ret = copy_to_user(release, buf, len);
70170+ if (len > sizeof(buf))
70171+ ret = -EFAULT;
70172+ else
70173+ ret = copy_to_user(release, buf, len);
70174 }
70175 return ret;
70176 }
70177@@ -1337,19 +1376,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
70178 return -EFAULT;
70179
70180 down_read(&uts_sem);
70181- error = __copy_to_user(&name->sysname, &utsname()->sysname,
70182+ error = __copy_to_user(name->sysname, &utsname()->sysname,
70183 __OLD_UTS_LEN);
70184 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
70185- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
70186+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
70187 __OLD_UTS_LEN);
70188 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
70189- error |= __copy_to_user(&name->release, &utsname()->release,
70190+ error |= __copy_to_user(name->release, &utsname()->release,
70191 __OLD_UTS_LEN);
70192 error |= __put_user(0, name->release + __OLD_UTS_LEN);
70193- error |= __copy_to_user(&name->version, &utsname()->version,
70194+ error |= __copy_to_user(name->version, &utsname()->version,
70195 __OLD_UTS_LEN);
70196 error |= __put_user(0, name->version + __OLD_UTS_LEN);
70197- error |= __copy_to_user(&name->machine, &utsname()->machine,
70198+ error |= __copy_to_user(name->machine, &utsname()->machine,
70199 __OLD_UTS_LEN);
70200 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
70201 up_read(&uts_sem);
70202@@ -2024,7 +2063,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
70203 error = get_dumpable(me->mm);
70204 break;
70205 case PR_SET_DUMPABLE:
70206- if (arg2 < 0 || arg2 > 1) {
70207+ if (arg2 > 1) {
70208 error = -EINVAL;
70209 break;
70210 }
70211diff --git a/kernel/sysctl.c b/kernel/sysctl.c
70212index 4ab1187..33f4f2b 100644
70213--- a/kernel/sysctl.c
70214+++ b/kernel/sysctl.c
70215@@ -91,7 +91,6 @@
70216
70217
70218 #if defined(CONFIG_SYSCTL)
70219-
70220 /* External variables not in a header file. */
70221 extern int sysctl_overcommit_memory;
70222 extern int sysctl_overcommit_ratio;
70223@@ -169,10 +168,13 @@ static int proc_taint(struct ctl_table *table, int write,
70224 void __user *buffer, size_t *lenp, loff_t *ppos);
70225 #endif
70226
70227-#ifdef CONFIG_PRINTK
70228 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
70229 void __user *buffer, size_t *lenp, loff_t *ppos);
70230-#endif
70231+
70232+static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
70233+ void __user *buffer, size_t *lenp, loff_t *ppos);
70234+static int proc_dostring_coredump(struct ctl_table *table, int write,
70235+ void __user *buffer, size_t *lenp, loff_t *ppos);
70236
70237 #ifdef CONFIG_MAGIC_SYSRQ
70238 /* Note: sysrq code uses it's own private copy */
70239@@ -196,6 +198,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
70240
70241 #endif
70242
70243+extern struct ctl_table grsecurity_table[];
70244+
70245 static struct ctl_table kern_table[];
70246 static struct ctl_table vm_table[];
70247 static struct ctl_table fs_table[];
70248@@ -210,6 +214,20 @@ extern struct ctl_table epoll_table[];
70249 int sysctl_legacy_va_layout;
70250 #endif
70251
70252+#ifdef CONFIG_PAX_SOFTMODE
70253+static ctl_table pax_table[] = {
70254+ {
70255+ .procname = "softmode",
70256+ .data = &pax_softmode,
70257+ .maxlen = sizeof(unsigned int),
70258+ .mode = 0600,
70259+ .proc_handler = &proc_dointvec,
70260+ },
70261+
70262+ { }
70263+};
70264+#endif
70265+
70266 /* The default sysctl tables: */
70267
70268 static struct ctl_table sysctl_base_table[] = {
70269@@ -256,6 +274,22 @@ static int max_extfrag_threshold = 1000;
70270 #endif
70271
70272 static struct ctl_table kern_table[] = {
70273+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
70274+ {
70275+ .procname = "grsecurity",
70276+ .mode = 0500,
70277+ .child = grsecurity_table,
70278+ },
70279+#endif
70280+
70281+#ifdef CONFIG_PAX_SOFTMODE
70282+ {
70283+ .procname = "pax",
70284+ .mode = 0500,
70285+ .child = pax_table,
70286+ },
70287+#endif
70288+
70289 {
70290 .procname = "sched_child_runs_first",
70291 .data = &sysctl_sched_child_runs_first,
70292@@ -410,7 +444,7 @@ static struct ctl_table kern_table[] = {
70293 .data = core_pattern,
70294 .maxlen = CORENAME_MAX_SIZE,
70295 .mode = 0644,
70296- .proc_handler = proc_dostring,
70297+ .proc_handler = proc_dostring_coredump,
70298 },
70299 {
70300 .procname = "core_pipe_limit",
70301@@ -540,7 +574,7 @@ static struct ctl_table kern_table[] = {
70302 .data = &modprobe_path,
70303 .maxlen = KMOD_PATH_LEN,
70304 .mode = 0644,
70305- .proc_handler = proc_dostring,
70306+ .proc_handler = proc_dostring_modpriv,
70307 },
70308 {
70309 .procname = "modules_disabled",
70310@@ -707,16 +741,20 @@ static struct ctl_table kern_table[] = {
70311 .extra1 = &zero,
70312 .extra2 = &one,
70313 },
70314+#endif
70315 {
70316 .procname = "kptr_restrict",
70317 .data = &kptr_restrict,
70318 .maxlen = sizeof(int),
70319 .mode = 0644,
70320 .proc_handler = proc_dointvec_minmax_sysadmin,
70321+#ifdef CONFIG_GRKERNSEC_HIDESYM
70322+ .extra1 = &two,
70323+#else
70324 .extra1 = &zero,
70325+#endif
70326 .extra2 = &two,
70327 },
70328-#endif
70329 {
70330 .procname = "ngroups_max",
70331 .data = &ngroups_max,
70332@@ -1215,6 +1253,13 @@ static struct ctl_table vm_table[] = {
70333 .proc_handler = proc_dointvec_minmax,
70334 .extra1 = &zero,
70335 },
70336+ {
70337+ .procname = "heap_stack_gap",
70338+ .data = &sysctl_heap_stack_gap,
70339+ .maxlen = sizeof(sysctl_heap_stack_gap),
70340+ .mode = 0644,
70341+ .proc_handler = proc_doulongvec_minmax,
70342+ },
70343 #else
70344 {
70345 .procname = "nr_trim_pages",
70346@@ -1498,7 +1543,7 @@ static struct ctl_table fs_table[] = {
70347 .data = &suid_dumpable,
70348 .maxlen = sizeof(int),
70349 .mode = 0644,
70350- .proc_handler = proc_dointvec_minmax,
70351+ .proc_handler = proc_dointvec_minmax_coredump,
70352 .extra1 = &zero,
70353 .extra2 = &two,
70354 },
70355@@ -1645,6 +1690,16 @@ int proc_dostring(struct ctl_table *table, int write,
70356 buffer, lenp, ppos);
70357 }
70358
70359+int proc_dostring_modpriv(struct ctl_table *table, int write,
70360+ void __user *buffer, size_t *lenp, loff_t *ppos)
70361+{
70362+ if (write && !capable(CAP_SYS_MODULE))
70363+ return -EPERM;
70364+
70365+ return _proc_do_string(table->data, table->maxlen, write,
70366+ buffer, lenp, ppos);
70367+}
70368+
70369 static size_t proc_skip_spaces(char **buf)
70370 {
70371 size_t ret;
70372@@ -1750,6 +1805,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
70373 len = strlen(tmp);
70374 if (len > *size)
70375 len = *size;
70376+ if (len > sizeof(tmp))
70377+ len = sizeof(tmp);
70378 if (copy_to_user(*buf, tmp, len))
70379 return -EFAULT;
70380 *size -= len;
70381@@ -1942,7 +1999,6 @@ static int proc_taint(struct ctl_table *table, int write,
70382 return err;
70383 }
70384
70385-#ifdef CONFIG_PRINTK
70386 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
70387 void __user *buffer, size_t *lenp, loff_t *ppos)
70388 {
70389@@ -1951,7 +2007,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
70390
70391 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
70392 }
70393-#endif
70394
70395 struct do_proc_dointvec_minmax_conv_param {
70396 int *min;
70397@@ -2009,6 +2064,34 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
70398 do_proc_dointvec_minmax_conv, &param);
70399 }
70400
70401+static void validate_coredump_safety(void)
70402+{
70403+ if (suid_dumpable == SUID_DUMPABLE_SAFE &&
70404+ core_pattern[0] != '/' && core_pattern[0] != '|') {
70405+ printk(KERN_WARNING "Unsafe core_pattern used with "\
70406+ "suid_dumpable=2. Pipe handler or fully qualified "\
70407+ "core dump path required.\n");
70408+ }
70409+}
70410+
70411+static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
70412+ void __user *buffer, size_t *lenp, loff_t *ppos)
70413+{
70414+ int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
70415+ if (!error)
70416+ validate_coredump_safety();
70417+ return error;
70418+}
70419+
70420+static int proc_dostring_coredump(struct ctl_table *table, int write,
70421+ void __user *buffer, size_t *lenp, loff_t *ppos)
70422+{
70423+ int error = proc_dostring(table, write, buffer, lenp, ppos);
70424+ if (!error)
70425+ validate_coredump_safety();
70426+ return error;
70427+}
70428+
70429 static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write,
70430 void __user *buffer,
70431 size_t *lenp, loff_t *ppos,
70432@@ -2066,8 +2149,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
70433 *i = val;
70434 } else {
70435 val = convdiv * (*i) / convmul;
70436- if (!first)
70437+ if (!first) {
70438 err = proc_put_char(&buffer, &left, '\t');
70439+ if (err)
70440+ break;
70441+ }
70442 err = proc_put_long(&buffer, &left, val, false);
70443 if (err)
70444 break;
70445@@ -2459,6 +2545,12 @@ int proc_dostring(struct ctl_table *table, int write,
70446 return -ENOSYS;
70447 }
70448
70449+int proc_dostring_modpriv(struct ctl_table *table, int write,
70450+ void __user *buffer, size_t *lenp, loff_t *ppos)
70451+{
70452+ return -ENOSYS;
70453+}
70454+
70455 int proc_dointvec(struct ctl_table *table, int write,
70456 void __user *buffer, size_t *lenp, loff_t *ppos)
70457 {
70458@@ -2515,5 +2607,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
70459 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
70460 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
70461 EXPORT_SYMBOL(proc_dostring);
70462+EXPORT_SYMBOL(proc_dostring_modpriv);
70463 EXPORT_SYMBOL(proc_doulongvec_minmax);
70464 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
70465diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
70466index a650694..aaeeb20 100644
70467--- a/kernel/sysctl_binary.c
70468+++ b/kernel/sysctl_binary.c
70469@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
70470 int i;
70471
70472 set_fs(KERNEL_DS);
70473- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
70474+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
70475 set_fs(old_fs);
70476 if (result < 0)
70477 goto out_kfree;
70478@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
70479 }
70480
70481 set_fs(KERNEL_DS);
70482- result = vfs_write(file, buffer, str - buffer, &pos);
70483+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
70484 set_fs(old_fs);
70485 if (result < 0)
70486 goto out_kfree;
70487@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
70488 int i;
70489
70490 set_fs(KERNEL_DS);
70491- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
70492+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
70493 set_fs(old_fs);
70494 if (result < 0)
70495 goto out_kfree;
70496@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
70497 }
70498
70499 set_fs(KERNEL_DS);
70500- result = vfs_write(file, buffer, str - buffer, &pos);
70501+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
70502 set_fs(old_fs);
70503 if (result < 0)
70504 goto out_kfree;
70505@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
70506 int i;
70507
70508 set_fs(KERNEL_DS);
70509- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
70510+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
70511 set_fs(old_fs);
70512 if (result < 0)
70513 goto out;
70514@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
70515 __le16 dnaddr;
70516
70517 set_fs(KERNEL_DS);
70518- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
70519+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
70520 set_fs(old_fs);
70521 if (result < 0)
70522 goto out;
70523@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
70524 le16_to_cpu(dnaddr) & 0x3ff);
70525
70526 set_fs(KERNEL_DS);
70527- result = vfs_write(file, buf, len, &pos);
70528+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
70529 set_fs(old_fs);
70530 if (result < 0)
70531 goto out;
70532diff --git a/kernel/taskstats.c b/kernel/taskstats.c
70533index e660464..c8b9e67 100644
70534--- a/kernel/taskstats.c
70535+++ b/kernel/taskstats.c
70536@@ -27,9 +27,12 @@
70537 #include <linux/cgroup.h>
70538 #include <linux/fs.h>
70539 #include <linux/file.h>
70540+#include <linux/grsecurity.h>
70541 #include <net/genetlink.h>
70542 #include <linux/atomic.h>
70543
70544+extern int gr_is_taskstats_denied(int pid);
70545+
70546 /*
70547 * Maximum length of a cpumask that can be specified in
70548 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
70549@@ -556,6 +559,9 @@ err:
70550
70551 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
70552 {
70553+ if (gr_is_taskstats_denied(current->pid))
70554+ return -EACCES;
70555+
70556 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
70557 return cmd_attr_register_cpumask(info);
70558 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
70559diff --git a/kernel/time.c b/kernel/time.c
70560index ba744cf..267b7c5 100644
70561--- a/kernel/time.c
70562+++ b/kernel/time.c
70563@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
70564 return error;
70565
70566 if (tz) {
70567+ /* we log in do_settimeofday called below, so don't log twice
70568+ */
70569+ if (!tv)
70570+ gr_log_timechange();
70571+
70572 sys_tz = *tz;
70573 update_vsyscall_tz();
70574 if (firsttime) {
70575diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
70576index aa27d39..34d221c 100644
70577--- a/kernel/time/alarmtimer.c
70578+++ b/kernel/time/alarmtimer.c
70579@@ -779,7 +779,7 @@ static int __init alarmtimer_init(void)
70580 struct platform_device *pdev;
70581 int error = 0;
70582 int i;
70583- struct k_clock alarm_clock = {
70584+ static struct k_clock alarm_clock = {
70585 .clock_getres = alarm_clock_getres,
70586 .clock_get = alarm_clock_get,
70587 .timer_create = alarm_timer_create,
70588diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
70589index f113755..ec24223 100644
70590--- a/kernel/time/tick-broadcast.c
70591+++ b/kernel/time/tick-broadcast.c
70592@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
70593 * then clear the broadcast bit.
70594 */
70595 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
70596- int cpu = smp_processor_id();
70597+ cpu = smp_processor_id();
70598
70599 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
70600 tick_broadcast_clear_oneshot(cpu);
70601diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
70602index 3447cfa..291806b 100644
70603--- a/kernel/time/timekeeping.c
70604+++ b/kernel/time/timekeeping.c
70605@@ -14,6 +14,7 @@
70606 #include <linux/init.h>
70607 #include <linux/mm.h>
70608 #include <linux/sched.h>
70609+#include <linux/grsecurity.h>
70610 #include <linux/syscore_ops.h>
70611 #include <linux/clocksource.h>
70612 #include <linux/jiffies.h>
70613@@ -387,6 +388,8 @@ int do_settimeofday(const struct timespec *tv)
70614 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
70615 return -EINVAL;
70616
70617+ gr_log_timechange();
70618+
70619 write_seqlock_irqsave(&timekeeper.lock, flags);
70620
70621 timekeeping_forward_now();
70622diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
70623index 3258455..f35227d 100644
70624--- a/kernel/time/timer_list.c
70625+++ b/kernel/time/timer_list.c
70626@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
70627
70628 static void print_name_offset(struct seq_file *m, void *sym)
70629 {
70630+#ifdef CONFIG_GRKERNSEC_HIDESYM
70631+ SEQ_printf(m, "<%p>", NULL);
70632+#else
70633 char symname[KSYM_NAME_LEN];
70634
70635 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
70636 SEQ_printf(m, "<%pK>", sym);
70637 else
70638 SEQ_printf(m, "%s", symname);
70639+#endif
70640 }
70641
70642 static void
70643@@ -112,7 +116,11 @@ next_one:
70644 static void
70645 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
70646 {
70647+#ifdef CONFIG_GRKERNSEC_HIDESYM
70648+ SEQ_printf(m, " .base: %p\n", NULL);
70649+#else
70650 SEQ_printf(m, " .base: %pK\n", base);
70651+#endif
70652 SEQ_printf(m, " .index: %d\n",
70653 base->index);
70654 SEQ_printf(m, " .resolution: %Lu nsecs\n",
70655@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
70656 {
70657 struct proc_dir_entry *pe;
70658
70659+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70660+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
70661+#else
70662 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
70663+#endif
70664 if (!pe)
70665 return -ENOMEM;
70666 return 0;
70667diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
70668index 0b537f2..9e71eca 100644
70669--- a/kernel/time/timer_stats.c
70670+++ b/kernel/time/timer_stats.c
70671@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
70672 static unsigned long nr_entries;
70673 static struct entry entries[MAX_ENTRIES];
70674
70675-static atomic_t overflow_count;
70676+static atomic_unchecked_t overflow_count;
70677
70678 /*
70679 * The entries are in a hash-table, for fast lookup:
70680@@ -140,7 +140,7 @@ static void reset_entries(void)
70681 nr_entries = 0;
70682 memset(entries, 0, sizeof(entries));
70683 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
70684- atomic_set(&overflow_count, 0);
70685+ atomic_set_unchecked(&overflow_count, 0);
70686 }
70687
70688 static struct entry *alloc_entry(void)
70689@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
70690 if (likely(entry))
70691 entry->count++;
70692 else
70693- atomic_inc(&overflow_count);
70694+ atomic_inc_unchecked(&overflow_count);
70695
70696 out_unlock:
70697 raw_spin_unlock_irqrestore(lock, flags);
70698@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
70699
70700 static void print_name_offset(struct seq_file *m, unsigned long addr)
70701 {
70702+#ifdef CONFIG_GRKERNSEC_HIDESYM
70703+ seq_printf(m, "<%p>", NULL);
70704+#else
70705 char symname[KSYM_NAME_LEN];
70706
70707 if (lookup_symbol_name(addr, symname) < 0)
70708 seq_printf(m, "<%p>", (void *)addr);
70709 else
70710 seq_printf(m, "%s", symname);
70711+#endif
70712 }
70713
70714 static int tstats_show(struct seq_file *m, void *v)
70715@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
70716
70717 seq_puts(m, "Timer Stats Version: v0.2\n");
70718 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
70719- if (atomic_read(&overflow_count))
70720+ if (atomic_read_unchecked(&overflow_count))
70721 seq_printf(m, "Overflow: %d entries\n",
70722- atomic_read(&overflow_count));
70723+ atomic_read_unchecked(&overflow_count));
70724
70725 for (i = 0; i < nr_entries; i++) {
70726 entry = entries + i;
70727@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
70728 {
70729 struct proc_dir_entry *pe;
70730
70731+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70732+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
70733+#else
70734 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
70735+#endif
70736 if (!pe)
70737 return -ENOMEM;
70738 return 0;
70739diff --git a/kernel/timer.c b/kernel/timer.c
70740index 6ec7e7e..cbc448b 100644
70741--- a/kernel/timer.c
70742+++ b/kernel/timer.c
70743@@ -1362,7 +1362,7 @@ void update_process_times(int user_tick)
70744 /*
70745 * This function runs timers and the timer-tq in bottom half context.
70746 */
70747-static void run_timer_softirq(struct softirq_action *h)
70748+static void run_timer_softirq(void)
70749 {
70750 struct tvec_base *base = __this_cpu_read(tvec_bases);
70751
70752diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
70753index c0bd030..62a1927 100644
70754--- a/kernel/trace/blktrace.c
70755+++ b/kernel/trace/blktrace.c
70756@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
70757 struct blk_trace *bt = filp->private_data;
70758 char buf[16];
70759
70760- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
70761+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
70762
70763 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
70764 }
70765@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
70766 return 1;
70767
70768 bt = buf->chan->private_data;
70769- atomic_inc(&bt->dropped);
70770+ atomic_inc_unchecked(&bt->dropped);
70771 return 0;
70772 }
70773
70774@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
70775
70776 bt->dir = dir;
70777 bt->dev = dev;
70778- atomic_set(&bt->dropped, 0);
70779+ atomic_set_unchecked(&bt->dropped, 0);
70780
70781 ret = -EIO;
70782 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
70783diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
70784index a008663..30d7429 100644
70785--- a/kernel/trace/ftrace.c
70786+++ b/kernel/trace/ftrace.c
70787@@ -1785,12 +1785,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
70788 if (unlikely(ftrace_disabled))
70789 return 0;
70790
70791+ ret = ftrace_arch_code_modify_prepare();
70792+ FTRACE_WARN_ON(ret);
70793+ if (ret)
70794+ return 0;
70795+
70796 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
70797+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
70798 if (ret) {
70799 ftrace_bug(ret, ip);
70800- return 0;
70801 }
70802- return 1;
70803+ return ret ? 0 : 1;
70804 }
70805
70806 /*
70807@@ -2885,7 +2890,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
70808
70809 int
70810 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
70811- void *data)
70812+ void *data)
70813 {
70814 struct ftrace_func_probe *entry;
70815 struct ftrace_page *pg;
70816@@ -3697,8 +3702,10 @@ static int ftrace_process_locs(struct module *mod,
70817 if (!count)
70818 return 0;
70819
70820+ pax_open_kernel();
70821 sort(start, count, sizeof(*start),
70822 ftrace_cmp_ips, ftrace_swap_ips);
70823+ pax_close_kernel();
70824
70825 start_pg = ftrace_allocate_pages(count);
70826 if (!start_pg)
70827diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
70828index a7fa070..403bc8d 100644
70829--- a/kernel/trace/trace.c
70830+++ b/kernel/trace/trace.c
70831@@ -4421,10 +4421,9 @@ static const struct file_operations tracing_dyn_info_fops = {
70832 };
70833 #endif
70834
70835-static struct dentry *d_tracer;
70836-
70837 struct dentry *tracing_init_dentry(void)
70838 {
70839+ static struct dentry *d_tracer;
70840 static int once;
70841
70842 if (d_tracer)
70843@@ -4444,10 +4443,9 @@ struct dentry *tracing_init_dentry(void)
70844 return d_tracer;
70845 }
70846
70847-static struct dentry *d_percpu;
70848-
70849 struct dentry *tracing_dentry_percpu(void)
70850 {
70851+ static struct dentry *d_percpu;
70852 static int once;
70853 struct dentry *d_tracer;
70854
70855diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
70856index 29111da..d190fe2 100644
70857--- a/kernel/trace/trace_events.c
70858+++ b/kernel/trace/trace_events.c
70859@@ -1308,10 +1308,6 @@ static LIST_HEAD(ftrace_module_file_list);
70860 struct ftrace_module_file_ops {
70861 struct list_head list;
70862 struct module *mod;
70863- struct file_operations id;
70864- struct file_operations enable;
70865- struct file_operations format;
70866- struct file_operations filter;
70867 };
70868
70869 static struct ftrace_module_file_ops *
70870@@ -1332,17 +1328,12 @@ trace_create_file_ops(struct module *mod)
70871
70872 file_ops->mod = mod;
70873
70874- file_ops->id = ftrace_event_id_fops;
70875- file_ops->id.owner = mod;
70876-
70877- file_ops->enable = ftrace_enable_fops;
70878- file_ops->enable.owner = mod;
70879-
70880- file_ops->filter = ftrace_event_filter_fops;
70881- file_ops->filter.owner = mod;
70882-
70883- file_ops->format = ftrace_event_format_fops;
70884- file_ops->format.owner = mod;
70885+ pax_open_kernel();
70886+ *(void **)&mod->trace_id.owner = mod;
70887+ *(void **)&mod->trace_enable.owner = mod;
70888+ *(void **)&mod->trace_filter.owner = mod;
70889+ *(void **)&mod->trace_format.owner = mod;
70890+ pax_close_kernel();
70891
70892 list_add(&file_ops->list, &ftrace_module_file_list);
70893
70894@@ -1366,8 +1357,8 @@ static void trace_module_add_events(struct module *mod)
70895
70896 for_each_event(call, start, end) {
70897 __trace_add_event_call(*call, mod,
70898- &file_ops->id, &file_ops->enable,
70899- &file_ops->filter, &file_ops->format);
70900+ &mod->trace_id, &mod->trace_enable,
70901+ &mod->trace_filter, &mod->trace_format);
70902 }
70903 }
70904
70905diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
70906index fd3c8aa..5f324a6 100644
70907--- a/kernel/trace/trace_mmiotrace.c
70908+++ b/kernel/trace/trace_mmiotrace.c
70909@@ -24,7 +24,7 @@ struct header_iter {
70910 static struct trace_array *mmio_trace_array;
70911 static bool overrun_detected;
70912 static unsigned long prev_overruns;
70913-static atomic_t dropped_count;
70914+static atomic_unchecked_t dropped_count;
70915
70916 static void mmio_reset_data(struct trace_array *tr)
70917 {
70918@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
70919
70920 static unsigned long count_overruns(struct trace_iterator *iter)
70921 {
70922- unsigned long cnt = atomic_xchg(&dropped_count, 0);
70923+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
70924 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
70925
70926 if (over > prev_overruns)
70927@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
70928 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
70929 sizeof(*entry), 0, pc);
70930 if (!event) {
70931- atomic_inc(&dropped_count);
70932+ atomic_inc_unchecked(&dropped_count);
70933 return;
70934 }
70935 entry = ring_buffer_event_data(event);
70936@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
70937 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
70938 sizeof(*entry), 0, pc);
70939 if (!event) {
70940- atomic_inc(&dropped_count);
70941+ atomic_inc_unchecked(&dropped_count);
70942 return;
70943 }
70944 entry = ring_buffer_event_data(event);
70945diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
70946index df611a0..10d8b32 100644
70947--- a/kernel/trace/trace_output.c
70948+++ b/kernel/trace/trace_output.c
70949@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
70950
70951 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
70952 if (!IS_ERR(p)) {
70953- p = mangle_path(s->buffer + s->len, p, "\n");
70954+ p = mangle_path(s->buffer + s->len, p, "\n\\");
70955 if (p) {
70956 s->len = p - s->buffer;
70957 return 1;
70958diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
70959index d4545f4..a9010a1 100644
70960--- a/kernel/trace/trace_stack.c
70961+++ b/kernel/trace/trace_stack.c
70962@@ -53,7 +53,7 @@ static inline void check_stack(void)
70963 return;
70964
70965 /* we do not handle interrupt stacks yet */
70966- if (!object_is_on_stack(&this_size))
70967+ if (!object_starts_on_stack(&this_size))
70968 return;
70969
70970 local_irq_save(flags);
70971diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
70972index ff5bdee..3eaeba6 100644
70973--- a/lib/Kconfig.debug
70974+++ b/lib/Kconfig.debug
70975@@ -1165,6 +1165,7 @@ config LATENCYTOP
70976 depends on DEBUG_KERNEL
70977 depends on STACKTRACE_SUPPORT
70978 depends on PROC_FS
70979+ depends on !GRKERNSEC_HIDESYM
70980 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
70981 select KALLSYMS
70982 select KALLSYMS_ALL
70983diff --git a/lib/bitmap.c b/lib/bitmap.c
70984index 06fdfa1..97c5c7d 100644
70985--- a/lib/bitmap.c
70986+++ b/lib/bitmap.c
70987@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
70988 {
70989 int c, old_c, totaldigits, ndigits, nchunks, nbits;
70990 u32 chunk;
70991- const char __user __force *ubuf = (const char __user __force *)buf;
70992+ const char __user *ubuf = (const char __force_user *)buf;
70993
70994 bitmap_zero(maskp, nmaskbits);
70995
70996@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
70997 {
70998 if (!access_ok(VERIFY_READ, ubuf, ulen))
70999 return -EFAULT;
71000- return __bitmap_parse((const char __force *)ubuf,
71001+ return __bitmap_parse((const char __force_kernel *)ubuf,
71002 ulen, 1, maskp, nmaskbits);
71003
71004 }
71005@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
71006 {
71007 unsigned a, b;
71008 int c, old_c, totaldigits;
71009- const char __user __force *ubuf = (const char __user __force *)buf;
71010+ const char __user *ubuf = (const char __force_user *)buf;
71011 int exp_digit, in_range;
71012
71013 totaldigits = c = 0;
71014@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
71015 {
71016 if (!access_ok(VERIFY_READ, ubuf, ulen))
71017 return -EFAULT;
71018- return __bitmap_parselist((const char __force *)ubuf,
71019+ return __bitmap_parselist((const char __force_kernel *)ubuf,
71020 ulen, 1, maskp, nmaskbits);
71021 }
71022 EXPORT_SYMBOL(bitmap_parselist_user);
71023diff --git a/lib/bug.c b/lib/bug.c
71024index a28c141..2bd3d95 100644
71025--- a/lib/bug.c
71026+++ b/lib/bug.c
71027@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
71028 return BUG_TRAP_TYPE_NONE;
71029
71030 bug = find_bug(bugaddr);
71031+ if (!bug)
71032+ return BUG_TRAP_TYPE_NONE;
71033
71034 file = NULL;
71035 line = 0;
71036diff --git a/lib/debugobjects.c b/lib/debugobjects.c
71037index d11808c..dc2d6f8 100644
71038--- a/lib/debugobjects.c
71039+++ b/lib/debugobjects.c
71040@@ -287,7 +287,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
71041 if (limit > 4)
71042 return;
71043
71044- is_on_stack = object_is_on_stack(addr);
71045+ is_on_stack = object_starts_on_stack(addr);
71046 if (is_on_stack == onstack)
71047 return;
71048
71049diff --git a/lib/devres.c b/lib/devres.c
71050index 80b9c76..9e32279 100644
71051--- a/lib/devres.c
71052+++ b/lib/devres.c
71053@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
71054 void devm_iounmap(struct device *dev, void __iomem *addr)
71055 {
71056 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
71057- (void *)addr));
71058+ (void __force *)addr));
71059 iounmap(addr);
71060 }
71061 EXPORT_SYMBOL(devm_iounmap);
71062@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
71063 {
71064 ioport_unmap(addr);
71065 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
71066- devm_ioport_map_match, (void *)addr));
71067+ devm_ioport_map_match, (void __force *)addr));
71068 }
71069 EXPORT_SYMBOL(devm_ioport_unmap);
71070
71071diff --git a/lib/digsig.c b/lib/digsig.c
71072index 286d558..8c0e629 100644
71073--- a/lib/digsig.c
71074+++ b/lib/digsig.c
71075@@ -163,9 +163,11 @@ static int digsig_verify_rsa(struct key *key,
71076 memcpy(out1 + head, p, l);
71077
71078 err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);
71079+ if (err)
71080+ goto err;
71081
71082- if (!err && len == hlen)
71083- err = memcmp(out2, h, hlen);
71084+ if (len != hlen || memcmp(out2, h, hlen))
71085+ err = -EINVAL;
71086
71087 err:
71088 mpi_free(in);
71089diff --git a/lib/dma-debug.c b/lib/dma-debug.c
71090index 66ce414..6f0a0dd 100644
71091--- a/lib/dma-debug.c
71092+++ b/lib/dma-debug.c
71093@@ -924,7 +924,7 @@ out:
71094
71095 static void check_for_stack(struct device *dev, void *addr)
71096 {
71097- if (object_is_on_stack(addr))
71098+ if (object_starts_on_stack(addr))
71099 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
71100 "stack [addr=%p]\n", addr);
71101 }
71102diff --git a/lib/inflate.c b/lib/inflate.c
71103index 013a761..c28f3fc 100644
71104--- a/lib/inflate.c
71105+++ b/lib/inflate.c
71106@@ -269,7 +269,7 @@ static void free(void *where)
71107 malloc_ptr = free_mem_ptr;
71108 }
71109 #else
71110-#define malloc(a) kmalloc(a, GFP_KERNEL)
71111+#define malloc(a) kmalloc((a), GFP_KERNEL)
71112 #define free(a) kfree(a)
71113 #endif
71114
71115diff --git a/lib/ioremap.c b/lib/ioremap.c
71116index 0c9216c..863bd89 100644
71117--- a/lib/ioremap.c
71118+++ b/lib/ioremap.c
71119@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
71120 unsigned long next;
71121
71122 phys_addr -= addr;
71123- pmd = pmd_alloc(&init_mm, pud, addr);
71124+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
71125 if (!pmd)
71126 return -ENOMEM;
71127 do {
71128@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
71129 unsigned long next;
71130
71131 phys_addr -= addr;
71132- pud = pud_alloc(&init_mm, pgd, addr);
71133+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
71134 if (!pud)
71135 return -ENOMEM;
71136 do {
71137diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
71138index bd2bea9..6b3c95e 100644
71139--- a/lib/is_single_threaded.c
71140+++ b/lib/is_single_threaded.c
71141@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
71142 struct task_struct *p, *t;
71143 bool ret;
71144
71145+ if (!mm)
71146+ return true;
71147+
71148 if (atomic_read(&task->signal->live) != 1)
71149 return false;
71150
71151diff --git a/lib/radix-tree.c b/lib/radix-tree.c
71152index e796429..6e38f9f 100644
71153--- a/lib/radix-tree.c
71154+++ b/lib/radix-tree.c
71155@@ -92,7 +92,7 @@ struct radix_tree_preload {
71156 int nr;
71157 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
71158 };
71159-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
71160+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
71161
71162 static inline void *ptr_to_indirect(void *ptr)
71163 {
71164diff --git a/lib/vsprintf.c b/lib/vsprintf.c
71165index 598a73e..5c5aeb5 100644
71166--- a/lib/vsprintf.c
71167+++ b/lib/vsprintf.c
71168@@ -16,6 +16,9 @@
71169 * - scnprintf and vscnprintf
71170 */
71171
71172+#ifdef CONFIG_GRKERNSEC_HIDESYM
71173+#define __INCLUDED_BY_HIDESYM 1
71174+#endif
71175 #include <stdarg.h>
71176 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
71177 #include <linux/types.h>
71178@@ -536,7 +539,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
71179 char sym[KSYM_SYMBOL_LEN];
71180 if (ext == 'B')
71181 sprint_backtrace(sym, value);
71182- else if (ext != 'f' && ext != 's')
71183+ else if (ext != 'f' && ext != 's' && ext != 'a')
71184 sprint_symbol(sym, value);
71185 else
71186 sprint_symbol_no_offset(sym, value);
71187@@ -912,7 +915,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
71188 return number(buf, end, *(const netdev_features_t *)addr, spec);
71189 }
71190
71191+#ifdef CONFIG_GRKERNSEC_HIDESYM
71192+int kptr_restrict __read_mostly = 2;
71193+#else
71194 int kptr_restrict __read_mostly;
71195+#endif
71196
71197 /*
71198 * Show a '%p' thing. A kernel extension is that the '%p' is followed
71199@@ -926,6 +933,8 @@ int kptr_restrict __read_mostly;
71200 * - 'S' For symbolic direct pointers with offset
71201 * - 's' For symbolic direct pointers without offset
71202 * - 'B' For backtraced symbolic direct pointers with offset
71203+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
71204+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
71205 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
71206 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
71207 * - 'M' For a 6-byte MAC address, it prints the address in the
71208@@ -973,12 +982,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
71209
71210 if (!ptr && *fmt != 'K') {
71211 /*
71212- * Print (null) with the same width as a pointer so it makes
71213+ * Print (nil) with the same width as a pointer so it makes
71214 * tabular output look nice.
71215 */
71216 if (spec.field_width == -1)
71217 spec.field_width = default_width;
71218- return string(buf, end, "(null)", spec);
71219+ return string(buf, end, "(nil)", spec);
71220 }
71221
71222 switch (*fmt) {
71223@@ -988,6 +997,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
71224 /* Fallthrough */
71225 case 'S':
71226 case 's':
71227+#ifdef CONFIG_GRKERNSEC_HIDESYM
71228+ break;
71229+#else
71230+ return symbol_string(buf, end, ptr, spec, *fmt);
71231+#endif
71232+ case 'A':
71233+ case 'a':
71234 case 'B':
71235 return symbol_string(buf, end, ptr, spec, *fmt);
71236 case 'R':
71237@@ -1025,6 +1041,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
71238 va_end(va);
71239 return buf;
71240 }
71241+ case 'P':
71242+ break;
71243 case 'K':
71244 /*
71245 * %pK cannot be used in IRQ context because its test
71246@@ -1048,6 +1066,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
71247 }
71248 break;
71249 }
71250+
71251+#ifdef CONFIG_GRKERNSEC_HIDESYM
71252+ /* 'P' = approved pointers to copy to userland,
71253+ as in the /proc/kallsyms case, as we make it display nothing
71254+ for non-root users, and the real contents for root users
71255+ Also ignore 'K' pointers, since we force their NULLing for non-root users
71256+ above
71257+ */
71258+ if (ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
71259+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
71260+ dump_stack();
71261+ ptr = NULL;
71262+ }
71263+#endif
71264+
71265 spec.flags |= SMALL;
71266 if (spec.field_width == -1) {
71267 spec.field_width = default_width;
71268@@ -1759,11 +1792,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
71269 typeof(type) value; \
71270 if (sizeof(type) == 8) { \
71271 args = PTR_ALIGN(args, sizeof(u32)); \
71272- *(u32 *)&value = *(u32 *)args; \
71273- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
71274+ *(u32 *)&value = *(const u32 *)args; \
71275+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
71276 } else { \
71277 args = PTR_ALIGN(args, sizeof(type)); \
71278- value = *(typeof(type) *)args; \
71279+ value = *(const typeof(type) *)args; \
71280 } \
71281 args += sizeof(type); \
71282 value; \
71283@@ -1826,7 +1859,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
71284 case FORMAT_TYPE_STR: {
71285 const char *str_arg = args;
71286 args += strlen(str_arg) + 1;
71287- str = string(str, end, (char *)str_arg, spec);
71288+ str = string(str, end, str_arg, spec);
71289 break;
71290 }
71291
71292diff --git a/localversion-grsec b/localversion-grsec
71293new file mode 100644
71294index 0000000..7cd6065
71295--- /dev/null
71296+++ b/localversion-grsec
71297@@ -0,0 +1 @@
71298+-grsec
71299diff --git a/mm/Kconfig b/mm/Kconfig
71300index 82fed4e..979e814 100644
71301--- a/mm/Kconfig
71302+++ b/mm/Kconfig
71303@@ -247,10 +247,10 @@ config KSM
71304 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
71305
71306 config DEFAULT_MMAP_MIN_ADDR
71307- int "Low address space to protect from user allocation"
71308+ int "Low address space to protect from user allocation"
71309 depends on MMU
71310- default 4096
71311- help
71312+ default 65536
71313+ help
71314 This is the portion of low virtual memory which should be protected
71315 from userspace allocation. Keeping a user from writing to low pages
71316 can help reduce the impact of kernel NULL pointer bugs.
71317@@ -280,7 +280,7 @@ config MEMORY_FAILURE
71318
71319 config HWPOISON_INJECT
71320 tristate "HWPoison pages injector"
71321- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
71322+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
71323 select PROC_PAGE_MONITOR
71324
71325 config NOMMU_INITIAL_TRIM_EXCESS
71326diff --git a/mm/filemap.c b/mm/filemap.c
71327index a4a5260..6151dc5 100644
71328--- a/mm/filemap.c
71329+++ b/mm/filemap.c
71330@@ -1723,7 +1723,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
71331 struct address_space *mapping = file->f_mapping;
71332
71333 if (!mapping->a_ops->readpage)
71334- return -ENOEXEC;
71335+ return -ENODEV;
71336 file_accessed(file);
71337 vma->vm_ops = &generic_file_vm_ops;
71338 vma->vm_flags |= VM_CAN_NONLINEAR;
71339@@ -2064,6 +2064,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
71340 *pos = i_size_read(inode);
71341
71342 if (limit != RLIM_INFINITY) {
71343+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
71344 if (*pos >= limit) {
71345 send_sig(SIGXFSZ, current, 0);
71346 return -EFBIG;
71347diff --git a/mm/fremap.c b/mm/fremap.c
71348index 9ed4fd4..c42648d 100644
71349--- a/mm/fremap.c
71350+++ b/mm/fremap.c
71351@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
71352 retry:
71353 vma = find_vma(mm, start);
71354
71355+#ifdef CONFIG_PAX_SEGMEXEC
71356+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
71357+ goto out;
71358+#endif
71359+
71360 /*
71361 * Make sure the vma is shared, that it supports prefaulting,
71362 * and that the remapped range is valid and fully within
71363diff --git a/mm/highmem.c b/mm/highmem.c
71364index 57d82c6..e9e0552 100644
71365--- a/mm/highmem.c
71366+++ b/mm/highmem.c
71367@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
71368 * So no dangers, even with speculative execution.
71369 */
71370 page = pte_page(pkmap_page_table[i]);
71371+ pax_open_kernel();
71372 pte_clear(&init_mm, (unsigned long)page_address(page),
71373 &pkmap_page_table[i]);
71374-
71375+ pax_close_kernel();
71376 set_page_address(page, NULL);
71377 need_flush = 1;
71378 }
71379@@ -186,9 +187,11 @@ start:
71380 }
71381 }
71382 vaddr = PKMAP_ADDR(last_pkmap_nr);
71383+
71384+ pax_open_kernel();
71385 set_pte_at(&init_mm, vaddr,
71386 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
71387-
71388+ pax_close_kernel();
71389 pkmap_count[last_pkmap_nr] = 1;
71390 set_page_address(page, (void *)vaddr);
71391
71392diff --git a/mm/huge_memory.c b/mm/huge_memory.c
71393index 57c4b93..24b8f59 100644
71394--- a/mm/huge_memory.c
71395+++ b/mm/huge_memory.c
71396@@ -735,7 +735,7 @@ out:
71397 * run pte_offset_map on the pmd, if an huge pmd could
71398 * materialize from under us from a different thread.
71399 */
71400- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
71401+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
71402 return VM_FAULT_OOM;
71403 /* if an huge pmd materialized from under us just retry later */
71404 if (unlikely(pmd_trans_huge(*pmd)))
71405diff --git a/mm/hugetlb.c b/mm/hugetlb.c
71406index 19558df..f7743b3 100644
71407--- a/mm/hugetlb.c
71408+++ b/mm/hugetlb.c
71409@@ -2463,6 +2463,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
71410 return 1;
71411 }
71412
71413+#ifdef CONFIG_PAX_SEGMEXEC
71414+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
71415+{
71416+ struct mm_struct *mm = vma->vm_mm;
71417+ struct vm_area_struct *vma_m;
71418+ unsigned long address_m;
71419+ pte_t *ptep_m;
71420+
71421+ vma_m = pax_find_mirror_vma(vma);
71422+ if (!vma_m)
71423+ return;
71424+
71425+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71426+ address_m = address + SEGMEXEC_TASK_SIZE;
71427+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
71428+ get_page(page_m);
71429+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
71430+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
71431+}
71432+#endif
71433+
71434 /*
71435 * Hugetlb_cow() should be called with page lock of the original hugepage held.
71436 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
71437@@ -2575,6 +2596,11 @@ retry_avoidcopy:
71438 make_huge_pte(vma, new_page, 1));
71439 page_remove_rmap(old_page);
71440 hugepage_add_new_anon_rmap(new_page, vma, address);
71441+
71442+#ifdef CONFIG_PAX_SEGMEXEC
71443+ pax_mirror_huge_pte(vma, address, new_page);
71444+#endif
71445+
71446 /* Make the old page be freed below */
71447 new_page = old_page;
71448 mmu_notifier_invalidate_range_end(mm,
71449@@ -2729,6 +2755,10 @@ retry:
71450 && (vma->vm_flags & VM_SHARED)));
71451 set_huge_pte_at(mm, address, ptep, new_pte);
71452
71453+#ifdef CONFIG_PAX_SEGMEXEC
71454+ pax_mirror_huge_pte(vma, address, page);
71455+#endif
71456+
71457 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
71458 /* Optimization, do the COW without a second fault */
71459 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
71460@@ -2758,6 +2788,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71461 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
71462 struct hstate *h = hstate_vma(vma);
71463
71464+#ifdef CONFIG_PAX_SEGMEXEC
71465+ struct vm_area_struct *vma_m;
71466+#endif
71467+
71468 address &= huge_page_mask(h);
71469
71470 ptep = huge_pte_offset(mm, address);
71471@@ -2771,6 +2805,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71472 VM_FAULT_SET_HINDEX(h - hstates);
71473 }
71474
71475+#ifdef CONFIG_PAX_SEGMEXEC
71476+ vma_m = pax_find_mirror_vma(vma);
71477+ if (vma_m) {
71478+ unsigned long address_m;
71479+
71480+ if (vma->vm_start > vma_m->vm_start) {
71481+ address_m = address;
71482+ address -= SEGMEXEC_TASK_SIZE;
71483+ vma = vma_m;
71484+ h = hstate_vma(vma);
71485+ } else
71486+ address_m = address + SEGMEXEC_TASK_SIZE;
71487+
71488+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
71489+ return VM_FAULT_OOM;
71490+ address_m &= HPAGE_MASK;
71491+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
71492+ }
71493+#endif
71494+
71495 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
71496 if (!ptep)
71497 return VM_FAULT_OOM;
71498diff --git a/mm/internal.h b/mm/internal.h
71499index 8052379..47029d1 100644
71500--- a/mm/internal.h
71501+++ b/mm/internal.h
71502@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
71503 * in mm/page_alloc.c
71504 */
71505 extern void __free_pages_bootmem(struct page *page, unsigned int order);
71506+extern void free_compound_page(struct page *page);
71507 extern void prep_compound_page(struct page *page, unsigned long order);
71508 #ifdef CONFIG_MEMORY_FAILURE
71509 extern bool is_free_buddy_page(struct page *page);
71510diff --git a/mm/kmemleak.c b/mm/kmemleak.c
71511index 45eb621..6ccd8ea 100644
71512--- a/mm/kmemleak.c
71513+++ b/mm/kmemleak.c
71514@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
71515
71516 for (i = 0; i < object->trace_len; i++) {
71517 void *ptr = (void *)object->trace[i];
71518- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
71519+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
71520 }
71521 }
71522
71523diff --git a/mm/maccess.c b/mm/maccess.c
71524index d53adf9..03a24bf 100644
71525--- a/mm/maccess.c
71526+++ b/mm/maccess.c
71527@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
71528 set_fs(KERNEL_DS);
71529 pagefault_disable();
71530 ret = __copy_from_user_inatomic(dst,
71531- (__force const void __user *)src, size);
71532+ (const void __force_user *)src, size);
71533 pagefault_enable();
71534 set_fs(old_fs);
71535
71536@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
71537
71538 set_fs(KERNEL_DS);
71539 pagefault_disable();
71540- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
71541+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
71542 pagefault_enable();
71543 set_fs(old_fs);
71544
71545diff --git a/mm/madvise.c b/mm/madvise.c
71546index 14d260f..b2a80fd 100644
71547--- a/mm/madvise.c
71548+++ b/mm/madvise.c
71549@@ -48,6 +48,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
71550 pgoff_t pgoff;
71551 unsigned long new_flags = vma->vm_flags;
71552
71553+#ifdef CONFIG_PAX_SEGMEXEC
71554+ struct vm_area_struct *vma_m;
71555+#endif
71556+
71557 switch (behavior) {
71558 case MADV_NORMAL:
71559 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
71560@@ -119,6 +123,13 @@ success:
71561 /*
71562 * vm_flags is protected by the mmap_sem held in write mode.
71563 */
71564+
71565+#ifdef CONFIG_PAX_SEGMEXEC
71566+ vma_m = pax_find_mirror_vma(vma);
71567+ if (vma_m)
71568+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
71569+#endif
71570+
71571 vma->vm_flags = new_flags;
71572
71573 out:
71574@@ -177,6 +188,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
71575 struct vm_area_struct ** prev,
71576 unsigned long start, unsigned long end)
71577 {
71578+
71579+#ifdef CONFIG_PAX_SEGMEXEC
71580+ struct vm_area_struct *vma_m;
71581+#endif
71582+
71583 *prev = vma;
71584 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
71585 return -EINVAL;
71586@@ -189,6 +205,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
71587 zap_page_range(vma, start, end - start, &details);
71588 } else
71589 zap_page_range(vma, start, end - start, NULL);
71590+
71591+#ifdef CONFIG_PAX_SEGMEXEC
71592+ vma_m = pax_find_mirror_vma(vma);
71593+ if (vma_m) {
71594+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
71595+ struct zap_details details = {
71596+ .nonlinear_vma = vma_m,
71597+ .last_index = ULONG_MAX,
71598+ };
71599+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
71600+ } else
71601+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
71602+ }
71603+#endif
71604+
71605 return 0;
71606 }
71607
71608@@ -393,6 +424,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
71609 if (end < start)
71610 goto out;
71611
71612+#ifdef CONFIG_PAX_SEGMEXEC
71613+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
71614+ if (end > SEGMEXEC_TASK_SIZE)
71615+ goto out;
71616+ } else
71617+#endif
71618+
71619+ if (end > TASK_SIZE)
71620+ goto out;
71621+
71622 error = 0;
71623 if (end == start)
71624 goto out;
71625diff --git a/mm/memory-failure.c b/mm/memory-failure.c
71626index 6de0d61..da836cf 100644
71627--- a/mm/memory-failure.c
71628+++ b/mm/memory-failure.c
71629@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
71630
71631 int sysctl_memory_failure_recovery __read_mostly = 1;
71632
71633-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
71634+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
71635
71636 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
71637
71638@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
71639 pfn, t->comm, t->pid);
71640 si.si_signo = SIGBUS;
71641 si.si_errno = 0;
71642- si.si_addr = (void *)addr;
71643+ si.si_addr = (void __user *)addr;
71644 #ifdef __ARCH_SI_TRAPNO
71645 si.si_trapno = trapno;
71646 #endif
71647@@ -1038,7 +1038,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
71648 }
71649
71650 nr_pages = 1 << compound_trans_order(hpage);
71651- atomic_long_add(nr_pages, &mce_bad_pages);
71652+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
71653
71654 /*
71655 * We need/can do nothing about count=0 pages.
71656@@ -1068,7 +1068,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
71657 if (!PageHWPoison(hpage)
71658 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
71659 || (p != hpage && TestSetPageHWPoison(hpage))) {
71660- atomic_long_sub(nr_pages, &mce_bad_pages);
71661+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71662 return 0;
71663 }
71664 set_page_hwpoison_huge_page(hpage);
71665@@ -1126,7 +1126,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
71666 }
71667 if (hwpoison_filter(p)) {
71668 if (TestClearPageHWPoison(p))
71669- atomic_long_sub(nr_pages, &mce_bad_pages);
71670+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71671 unlock_page(hpage);
71672 put_page(hpage);
71673 return 0;
71674@@ -1321,7 +1321,7 @@ int unpoison_memory(unsigned long pfn)
71675 return 0;
71676 }
71677 if (TestClearPageHWPoison(p))
71678- atomic_long_sub(nr_pages, &mce_bad_pages);
71679+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71680 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
71681 return 0;
71682 }
71683@@ -1335,7 +1335,7 @@ int unpoison_memory(unsigned long pfn)
71684 */
71685 if (TestClearPageHWPoison(page)) {
71686 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
71687- atomic_long_sub(nr_pages, &mce_bad_pages);
71688+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71689 freeit = 1;
71690 if (PageHuge(page))
71691 clear_page_hwpoison_huge_page(page);
71692@@ -1448,7 +1448,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
71693 }
71694 done:
71695 if (!PageHWPoison(hpage))
71696- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
71697+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
71698 set_page_hwpoison_huge_page(hpage);
71699 dequeue_hwpoisoned_huge_page(hpage);
71700 /* keep elevated page count for bad page */
71701@@ -1579,7 +1579,7 @@ int soft_offline_page(struct page *page, int flags)
71702 return ret;
71703
71704 done:
71705- atomic_long_add(1, &mce_bad_pages);
71706+ atomic_long_add_unchecked(1, &mce_bad_pages);
71707 SetPageHWPoison(page);
71708 /* keep elevated page count for bad page */
71709 return ret;
71710diff --git a/mm/memory.c b/mm/memory.c
71711index 2466d12..595ed79 100644
71712--- a/mm/memory.c
71713+++ b/mm/memory.c
71714@@ -422,6 +422,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
71715 free_pte_range(tlb, pmd, addr);
71716 } while (pmd++, addr = next, addr != end);
71717
71718+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
71719 start &= PUD_MASK;
71720 if (start < floor)
71721 return;
71722@@ -436,6 +437,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
71723 pmd = pmd_offset(pud, start);
71724 pud_clear(pud);
71725 pmd_free_tlb(tlb, pmd, start);
71726+#endif
71727+
71728 }
71729
71730 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
71731@@ -455,6 +458,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
71732 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
71733 } while (pud++, addr = next, addr != end);
71734
71735+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
71736 start &= PGDIR_MASK;
71737 if (start < floor)
71738 return;
71739@@ -469,6 +473,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
71740 pud = pud_offset(pgd, start);
71741 pgd_clear(pgd);
71742 pud_free_tlb(tlb, pud, start);
71743+#endif
71744+
71745 }
71746
71747 /*
71748@@ -1602,12 +1608,6 @@ no_page_table:
71749 return page;
71750 }
71751
71752-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
71753-{
71754- return stack_guard_page_start(vma, addr) ||
71755- stack_guard_page_end(vma, addr+PAGE_SIZE);
71756-}
71757-
71758 /**
71759 * __get_user_pages() - pin user pages in memory
71760 * @tsk: task_struct of target task
71761@@ -1680,10 +1680,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71762 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
71763 i = 0;
71764
71765- do {
71766+ while (nr_pages) {
71767 struct vm_area_struct *vma;
71768
71769- vma = find_extend_vma(mm, start);
71770+ vma = find_vma(mm, start);
71771 if (!vma && in_gate_area(mm, start)) {
71772 unsigned long pg = start & PAGE_MASK;
71773 pgd_t *pgd;
71774@@ -1731,7 +1731,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71775 goto next_page;
71776 }
71777
71778- if (!vma ||
71779+ if (!vma || start < vma->vm_start ||
71780 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
71781 !(vm_flags & vma->vm_flags))
71782 return i ? : -EFAULT;
71783@@ -1758,11 +1758,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71784 int ret;
71785 unsigned int fault_flags = 0;
71786
71787- /* For mlock, just skip the stack guard page. */
71788- if (foll_flags & FOLL_MLOCK) {
71789- if (stack_guard_page(vma, start))
71790- goto next_page;
71791- }
71792 if (foll_flags & FOLL_WRITE)
71793 fault_flags |= FAULT_FLAG_WRITE;
71794 if (nonblocking)
71795@@ -1836,7 +1831,7 @@ next_page:
71796 start += PAGE_SIZE;
71797 nr_pages--;
71798 } while (nr_pages && start < vma->vm_end);
71799- } while (nr_pages);
71800+ }
71801 return i;
71802 }
71803 EXPORT_SYMBOL(__get_user_pages);
71804@@ -2043,6 +2038,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
71805 page_add_file_rmap(page);
71806 set_pte_at(mm, addr, pte, mk_pte(page, prot));
71807
71808+#ifdef CONFIG_PAX_SEGMEXEC
71809+ pax_mirror_file_pte(vma, addr, page, ptl);
71810+#endif
71811+
71812 retval = 0;
71813 pte_unmap_unlock(pte, ptl);
71814 return retval;
71815@@ -2077,10 +2076,22 @@ out:
71816 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
71817 struct page *page)
71818 {
71819+
71820+#ifdef CONFIG_PAX_SEGMEXEC
71821+ struct vm_area_struct *vma_m;
71822+#endif
71823+
71824 if (addr < vma->vm_start || addr >= vma->vm_end)
71825 return -EFAULT;
71826 if (!page_count(page))
71827 return -EINVAL;
71828+
71829+#ifdef CONFIG_PAX_SEGMEXEC
71830+ vma_m = pax_find_mirror_vma(vma);
71831+ if (vma_m)
71832+ vma_m->vm_flags |= VM_INSERTPAGE;
71833+#endif
71834+
71835 vma->vm_flags |= VM_INSERTPAGE;
71836 return insert_page(vma, addr, page, vma->vm_page_prot);
71837 }
71838@@ -2166,6 +2177,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
71839 unsigned long pfn)
71840 {
71841 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
71842+ BUG_ON(vma->vm_mirror);
71843
71844 if (addr < vma->vm_start || addr >= vma->vm_end)
71845 return -EFAULT;
71846@@ -2373,7 +2385,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
71847
71848 BUG_ON(pud_huge(*pud));
71849
71850- pmd = pmd_alloc(mm, pud, addr);
71851+ pmd = (mm == &init_mm) ?
71852+ pmd_alloc_kernel(mm, pud, addr) :
71853+ pmd_alloc(mm, pud, addr);
71854 if (!pmd)
71855 return -ENOMEM;
71856 do {
71857@@ -2393,7 +2407,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
71858 unsigned long next;
71859 int err;
71860
71861- pud = pud_alloc(mm, pgd, addr);
71862+ pud = (mm == &init_mm) ?
71863+ pud_alloc_kernel(mm, pgd, addr) :
71864+ pud_alloc(mm, pgd, addr);
71865 if (!pud)
71866 return -ENOMEM;
71867 do {
71868@@ -2481,6 +2497,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
71869 copy_user_highpage(dst, src, va, vma);
71870 }
71871
71872+#ifdef CONFIG_PAX_SEGMEXEC
71873+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
71874+{
71875+ struct mm_struct *mm = vma->vm_mm;
71876+ spinlock_t *ptl;
71877+ pte_t *pte, entry;
71878+
71879+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
71880+ entry = *pte;
71881+ if (!pte_present(entry)) {
71882+ if (!pte_none(entry)) {
71883+ BUG_ON(pte_file(entry));
71884+ free_swap_and_cache(pte_to_swp_entry(entry));
71885+ pte_clear_not_present_full(mm, address, pte, 0);
71886+ }
71887+ } else {
71888+ struct page *page;
71889+
71890+ flush_cache_page(vma, address, pte_pfn(entry));
71891+ entry = ptep_clear_flush(vma, address, pte);
71892+ BUG_ON(pte_dirty(entry));
71893+ page = vm_normal_page(vma, address, entry);
71894+ if (page) {
71895+ update_hiwater_rss(mm);
71896+ if (PageAnon(page))
71897+ dec_mm_counter_fast(mm, MM_ANONPAGES);
71898+ else
71899+ dec_mm_counter_fast(mm, MM_FILEPAGES);
71900+ page_remove_rmap(page);
71901+ page_cache_release(page);
71902+ }
71903+ }
71904+ pte_unmap_unlock(pte, ptl);
71905+}
71906+
71907+/* PaX: if vma is mirrored, synchronize the mirror's PTE
71908+ *
71909+ * the ptl of the lower mapped page is held on entry and is not released on exit
71910+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
71911+ */
71912+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
71913+{
71914+ struct mm_struct *mm = vma->vm_mm;
71915+ unsigned long address_m;
71916+ spinlock_t *ptl_m;
71917+ struct vm_area_struct *vma_m;
71918+ pmd_t *pmd_m;
71919+ pte_t *pte_m, entry_m;
71920+
71921+ BUG_ON(!page_m || !PageAnon(page_m));
71922+
71923+ vma_m = pax_find_mirror_vma(vma);
71924+ if (!vma_m)
71925+ return;
71926+
71927+ BUG_ON(!PageLocked(page_m));
71928+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71929+ address_m = address + SEGMEXEC_TASK_SIZE;
71930+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
71931+ pte_m = pte_offset_map(pmd_m, address_m);
71932+ ptl_m = pte_lockptr(mm, pmd_m);
71933+ if (ptl != ptl_m) {
71934+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71935+ if (!pte_none(*pte_m))
71936+ goto out;
71937+ }
71938+
71939+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
71940+ page_cache_get(page_m);
71941+ page_add_anon_rmap(page_m, vma_m, address_m);
71942+ inc_mm_counter_fast(mm, MM_ANONPAGES);
71943+ set_pte_at(mm, address_m, pte_m, entry_m);
71944+ update_mmu_cache(vma_m, address_m, entry_m);
71945+out:
71946+ if (ptl != ptl_m)
71947+ spin_unlock(ptl_m);
71948+ pte_unmap(pte_m);
71949+ unlock_page(page_m);
71950+}
71951+
71952+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
71953+{
71954+ struct mm_struct *mm = vma->vm_mm;
71955+ unsigned long address_m;
71956+ spinlock_t *ptl_m;
71957+ struct vm_area_struct *vma_m;
71958+ pmd_t *pmd_m;
71959+ pte_t *pte_m, entry_m;
71960+
71961+ BUG_ON(!page_m || PageAnon(page_m));
71962+
71963+ vma_m = pax_find_mirror_vma(vma);
71964+ if (!vma_m)
71965+ return;
71966+
71967+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71968+ address_m = address + SEGMEXEC_TASK_SIZE;
71969+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
71970+ pte_m = pte_offset_map(pmd_m, address_m);
71971+ ptl_m = pte_lockptr(mm, pmd_m);
71972+ if (ptl != ptl_m) {
71973+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71974+ if (!pte_none(*pte_m))
71975+ goto out;
71976+ }
71977+
71978+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
71979+ page_cache_get(page_m);
71980+ page_add_file_rmap(page_m);
71981+ inc_mm_counter_fast(mm, MM_FILEPAGES);
71982+ set_pte_at(mm, address_m, pte_m, entry_m);
71983+ update_mmu_cache(vma_m, address_m, entry_m);
71984+out:
71985+ if (ptl != ptl_m)
71986+ spin_unlock(ptl_m);
71987+ pte_unmap(pte_m);
71988+}
71989+
71990+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
71991+{
71992+ struct mm_struct *mm = vma->vm_mm;
71993+ unsigned long address_m;
71994+ spinlock_t *ptl_m;
71995+ struct vm_area_struct *vma_m;
71996+ pmd_t *pmd_m;
71997+ pte_t *pte_m, entry_m;
71998+
71999+ vma_m = pax_find_mirror_vma(vma);
72000+ if (!vma_m)
72001+ return;
72002+
72003+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
72004+ address_m = address + SEGMEXEC_TASK_SIZE;
72005+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
72006+ pte_m = pte_offset_map(pmd_m, address_m);
72007+ ptl_m = pte_lockptr(mm, pmd_m);
72008+ if (ptl != ptl_m) {
72009+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
72010+ if (!pte_none(*pte_m))
72011+ goto out;
72012+ }
72013+
72014+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
72015+ set_pte_at(mm, address_m, pte_m, entry_m);
72016+out:
72017+ if (ptl != ptl_m)
72018+ spin_unlock(ptl_m);
72019+ pte_unmap(pte_m);
72020+}
72021+
72022+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
72023+{
72024+ struct page *page_m;
72025+ pte_t entry;
72026+
72027+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
72028+ goto out;
72029+
72030+ entry = *pte;
72031+ page_m = vm_normal_page(vma, address, entry);
72032+ if (!page_m)
72033+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
72034+ else if (PageAnon(page_m)) {
72035+ if (pax_find_mirror_vma(vma)) {
72036+ pte_unmap_unlock(pte, ptl);
72037+ lock_page(page_m);
72038+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
72039+ if (pte_same(entry, *pte))
72040+ pax_mirror_anon_pte(vma, address, page_m, ptl);
72041+ else
72042+ unlock_page(page_m);
72043+ }
72044+ } else
72045+ pax_mirror_file_pte(vma, address, page_m, ptl);
72046+
72047+out:
72048+ pte_unmap_unlock(pte, ptl);
72049+}
72050+#endif
72051+
72052 /*
72053 * This routine handles present pages, when users try to write
72054 * to a shared page. It is done by copying the page to a new address
72055@@ -2692,6 +2888,12 @@ gotten:
72056 */
72057 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
72058 if (likely(pte_same(*page_table, orig_pte))) {
72059+
72060+#ifdef CONFIG_PAX_SEGMEXEC
72061+ if (pax_find_mirror_vma(vma))
72062+ BUG_ON(!trylock_page(new_page));
72063+#endif
72064+
72065 if (old_page) {
72066 if (!PageAnon(old_page)) {
72067 dec_mm_counter_fast(mm, MM_FILEPAGES);
72068@@ -2743,6 +2945,10 @@ gotten:
72069 page_remove_rmap(old_page);
72070 }
72071
72072+#ifdef CONFIG_PAX_SEGMEXEC
72073+ pax_mirror_anon_pte(vma, address, new_page, ptl);
72074+#endif
72075+
72076 /* Free the old page.. */
72077 new_page = old_page;
72078 ret |= VM_FAULT_WRITE;
72079@@ -3022,6 +3228,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
72080 swap_free(entry);
72081 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
72082 try_to_free_swap(page);
72083+
72084+#ifdef CONFIG_PAX_SEGMEXEC
72085+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
72086+#endif
72087+
72088 unlock_page(page);
72089 if (swapcache) {
72090 /*
72091@@ -3045,6 +3256,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
72092
72093 /* No need to invalidate - it was non-present before */
72094 update_mmu_cache(vma, address, page_table);
72095+
72096+#ifdef CONFIG_PAX_SEGMEXEC
72097+ pax_mirror_anon_pte(vma, address, page, ptl);
72098+#endif
72099+
72100 unlock:
72101 pte_unmap_unlock(page_table, ptl);
72102 out:
72103@@ -3064,40 +3280,6 @@ out_release:
72104 }
72105
72106 /*
72107- * This is like a special single-page "expand_{down|up}wards()",
72108- * except we must first make sure that 'address{-|+}PAGE_SIZE'
72109- * doesn't hit another vma.
72110- */
72111-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
72112-{
72113- address &= PAGE_MASK;
72114- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
72115- struct vm_area_struct *prev = vma->vm_prev;
72116-
72117- /*
72118- * Is there a mapping abutting this one below?
72119- *
72120- * That's only ok if it's the same stack mapping
72121- * that has gotten split..
72122- */
72123- if (prev && prev->vm_end == address)
72124- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
72125-
72126- expand_downwards(vma, address - PAGE_SIZE);
72127- }
72128- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
72129- struct vm_area_struct *next = vma->vm_next;
72130-
72131- /* As VM_GROWSDOWN but s/below/above/ */
72132- if (next && next->vm_start == address + PAGE_SIZE)
72133- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
72134-
72135- expand_upwards(vma, address + PAGE_SIZE);
72136- }
72137- return 0;
72138-}
72139-
72140-/*
72141 * We enter with non-exclusive mmap_sem (to exclude vma changes,
72142 * but allow concurrent faults), and pte mapped but not yet locked.
72143 * We return with mmap_sem still held, but pte unmapped and unlocked.
72144@@ -3106,27 +3288,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
72145 unsigned long address, pte_t *page_table, pmd_t *pmd,
72146 unsigned int flags)
72147 {
72148- struct page *page;
72149+ struct page *page = NULL;
72150 spinlock_t *ptl;
72151 pte_t entry;
72152
72153- pte_unmap(page_table);
72154-
72155- /* Check if we need to add a guard page to the stack */
72156- if (check_stack_guard_page(vma, address) < 0)
72157- return VM_FAULT_SIGBUS;
72158-
72159- /* Use the zero-page for reads */
72160 if (!(flags & FAULT_FLAG_WRITE)) {
72161 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
72162 vma->vm_page_prot));
72163- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
72164+ ptl = pte_lockptr(mm, pmd);
72165+ spin_lock(ptl);
72166 if (!pte_none(*page_table))
72167 goto unlock;
72168 goto setpte;
72169 }
72170
72171 /* Allocate our own private page. */
72172+ pte_unmap(page_table);
72173+
72174 if (unlikely(anon_vma_prepare(vma)))
72175 goto oom;
72176 page = alloc_zeroed_user_highpage_movable(vma, address);
72177@@ -3145,6 +3323,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
72178 if (!pte_none(*page_table))
72179 goto release;
72180
72181+#ifdef CONFIG_PAX_SEGMEXEC
72182+ if (pax_find_mirror_vma(vma))
72183+ BUG_ON(!trylock_page(page));
72184+#endif
72185+
72186 inc_mm_counter_fast(mm, MM_ANONPAGES);
72187 page_add_new_anon_rmap(page, vma, address);
72188 setpte:
72189@@ -3152,6 +3335,12 @@ setpte:
72190
72191 /* No need to invalidate - it was non-present before */
72192 update_mmu_cache(vma, address, page_table);
72193+
72194+#ifdef CONFIG_PAX_SEGMEXEC
72195+ if (page)
72196+ pax_mirror_anon_pte(vma, address, page, ptl);
72197+#endif
72198+
72199 unlock:
72200 pte_unmap_unlock(page_table, ptl);
72201 return 0;
72202@@ -3295,6 +3484,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
72203 */
72204 /* Only go through if we didn't race with anybody else... */
72205 if (likely(pte_same(*page_table, orig_pte))) {
72206+
72207+#ifdef CONFIG_PAX_SEGMEXEC
72208+ if (anon && pax_find_mirror_vma(vma))
72209+ BUG_ON(!trylock_page(page));
72210+#endif
72211+
72212 flush_icache_page(vma, page);
72213 entry = mk_pte(page, vma->vm_page_prot);
72214 if (flags & FAULT_FLAG_WRITE)
72215@@ -3314,6 +3509,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
72216
72217 /* no need to invalidate: a not-present page won't be cached */
72218 update_mmu_cache(vma, address, page_table);
72219+
72220+#ifdef CONFIG_PAX_SEGMEXEC
72221+ if (anon)
72222+ pax_mirror_anon_pte(vma, address, page, ptl);
72223+ else
72224+ pax_mirror_file_pte(vma, address, page, ptl);
72225+#endif
72226+
72227 } else {
72228 if (cow_page)
72229 mem_cgroup_uncharge_page(cow_page);
72230@@ -3467,6 +3670,12 @@ int handle_pte_fault(struct mm_struct *mm,
72231 if (flags & FAULT_FLAG_WRITE)
72232 flush_tlb_fix_spurious_fault(vma, address);
72233 }
72234+
72235+#ifdef CONFIG_PAX_SEGMEXEC
72236+ pax_mirror_pte(vma, address, pte, pmd, ptl);
72237+ return 0;
72238+#endif
72239+
72240 unlock:
72241 pte_unmap_unlock(pte, ptl);
72242 return 0;
72243@@ -3483,6 +3692,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
72244 pmd_t *pmd;
72245 pte_t *pte;
72246
72247+#ifdef CONFIG_PAX_SEGMEXEC
72248+ struct vm_area_struct *vma_m;
72249+#endif
72250+
72251 __set_current_state(TASK_RUNNING);
72252
72253 count_vm_event(PGFAULT);
72254@@ -3494,6 +3707,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
72255 if (unlikely(is_vm_hugetlb_page(vma)))
72256 return hugetlb_fault(mm, vma, address, flags);
72257
72258+#ifdef CONFIG_PAX_SEGMEXEC
72259+ vma_m = pax_find_mirror_vma(vma);
72260+ if (vma_m) {
72261+ unsigned long address_m;
72262+ pgd_t *pgd_m;
72263+ pud_t *pud_m;
72264+ pmd_t *pmd_m;
72265+
72266+ if (vma->vm_start > vma_m->vm_start) {
72267+ address_m = address;
72268+ address -= SEGMEXEC_TASK_SIZE;
72269+ vma = vma_m;
72270+ } else
72271+ address_m = address + SEGMEXEC_TASK_SIZE;
72272+
72273+ pgd_m = pgd_offset(mm, address_m);
72274+ pud_m = pud_alloc(mm, pgd_m, address_m);
72275+ if (!pud_m)
72276+ return VM_FAULT_OOM;
72277+ pmd_m = pmd_alloc(mm, pud_m, address_m);
72278+ if (!pmd_m)
72279+ return VM_FAULT_OOM;
72280+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
72281+ return VM_FAULT_OOM;
72282+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
72283+ }
72284+#endif
72285+
72286 retry:
72287 pgd = pgd_offset(mm, address);
72288 pud = pud_alloc(mm, pgd, address);
72289@@ -3535,7 +3776,7 @@ retry:
72290 * run pte_offset_map on the pmd, if an huge pmd could
72291 * materialize from under us from a different thread.
72292 */
72293- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
72294+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
72295 return VM_FAULT_OOM;
72296 /* if an huge pmd materialized from under us just retry later */
72297 if (unlikely(pmd_trans_huge(*pmd)))
72298@@ -3572,6 +3813,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
72299 spin_unlock(&mm->page_table_lock);
72300 return 0;
72301 }
72302+
72303+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
72304+{
72305+ pud_t *new = pud_alloc_one(mm, address);
72306+ if (!new)
72307+ return -ENOMEM;
72308+
72309+ smp_wmb(); /* See comment in __pte_alloc */
72310+
72311+ spin_lock(&mm->page_table_lock);
72312+ if (pgd_present(*pgd)) /* Another has populated it */
72313+ pud_free(mm, new);
72314+ else
72315+ pgd_populate_kernel(mm, pgd, new);
72316+ spin_unlock(&mm->page_table_lock);
72317+ return 0;
72318+}
72319 #endif /* __PAGETABLE_PUD_FOLDED */
72320
72321 #ifndef __PAGETABLE_PMD_FOLDED
72322@@ -3602,6 +3860,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
72323 spin_unlock(&mm->page_table_lock);
72324 return 0;
72325 }
72326+
72327+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
72328+{
72329+ pmd_t *new = pmd_alloc_one(mm, address);
72330+ if (!new)
72331+ return -ENOMEM;
72332+
72333+ smp_wmb(); /* See comment in __pte_alloc */
72334+
72335+ spin_lock(&mm->page_table_lock);
72336+#ifndef __ARCH_HAS_4LEVEL_HACK
72337+ if (pud_present(*pud)) /* Another has populated it */
72338+ pmd_free(mm, new);
72339+ else
72340+ pud_populate_kernel(mm, pud, new);
72341+#else
72342+ if (pgd_present(*pud)) /* Another has populated it */
72343+ pmd_free(mm, new);
72344+ else
72345+ pgd_populate_kernel(mm, pud, new);
72346+#endif /* __ARCH_HAS_4LEVEL_HACK */
72347+ spin_unlock(&mm->page_table_lock);
72348+ return 0;
72349+}
72350 #endif /* __PAGETABLE_PMD_FOLDED */
72351
72352 int make_pages_present(unsigned long addr, unsigned long end)
72353@@ -3639,7 +3921,7 @@ static int __init gate_vma_init(void)
72354 gate_vma.vm_start = FIXADDR_USER_START;
72355 gate_vma.vm_end = FIXADDR_USER_END;
72356 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
72357- gate_vma.vm_page_prot = __P101;
72358+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
72359
72360 return 0;
72361 }
72362diff --git a/mm/mempolicy.c b/mm/mempolicy.c
72363index b12b28a..64b57d0 100644
72364--- a/mm/mempolicy.c
72365+++ b/mm/mempolicy.c
72366@@ -619,6 +619,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
72367 unsigned long vmstart;
72368 unsigned long vmend;
72369
72370+#ifdef CONFIG_PAX_SEGMEXEC
72371+ struct vm_area_struct *vma_m;
72372+#endif
72373+
72374 vma = find_vma(mm, start);
72375 if (!vma || vma->vm_start > start)
72376 return -EFAULT;
72377@@ -672,6 +676,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
72378 if (err)
72379 goto out;
72380 }
72381+
72382+#ifdef CONFIG_PAX_SEGMEXEC
72383+ vma_m = pax_find_mirror_vma(vma);
72384+ if (vma_m && vma_m->vm_ops && vma_m->vm_ops->set_policy) {
72385+ err = vma_m->vm_ops->set_policy(vma_m, new_pol);
72386+ if (err)
72387+ goto out;
72388+ }
72389+#endif
72390+
72391 }
72392
72393 out:
72394@@ -1125,6 +1139,17 @@ static long do_mbind(unsigned long start, unsigned long len,
72395
72396 if (end < start)
72397 return -EINVAL;
72398+
72399+#ifdef CONFIG_PAX_SEGMEXEC
72400+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
72401+ if (end > SEGMEXEC_TASK_SIZE)
72402+ return -EINVAL;
72403+ } else
72404+#endif
72405+
72406+ if (end > TASK_SIZE)
72407+ return -EINVAL;
72408+
72409 if (end == start)
72410 return 0;
72411
72412@@ -1348,8 +1373,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
72413 */
72414 tcred = __task_cred(task);
72415 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
72416- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
72417- !capable(CAP_SYS_NICE)) {
72418+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
72419 rcu_read_unlock();
72420 err = -EPERM;
72421 goto out_put;
72422@@ -1380,6 +1404,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
72423 goto out;
72424 }
72425
72426+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72427+ if (mm != current->mm &&
72428+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
72429+ mmput(mm);
72430+ err = -EPERM;
72431+ goto out;
72432+ }
72433+#endif
72434+
72435 err = do_migrate_pages(mm, old, new,
72436 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
72437
72438diff --git a/mm/mlock.c b/mm/mlock.c
72439index ef726e8..cd7f1ec 100644
72440--- a/mm/mlock.c
72441+++ b/mm/mlock.c
72442@@ -13,6 +13,7 @@
72443 #include <linux/pagemap.h>
72444 #include <linux/mempolicy.h>
72445 #include <linux/syscalls.h>
72446+#include <linux/security.h>
72447 #include <linux/sched.h>
72448 #include <linux/export.h>
72449 #include <linux/rmap.h>
72450@@ -376,7 +377,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
72451 {
72452 unsigned long nstart, end, tmp;
72453 struct vm_area_struct * vma, * prev;
72454- int error;
72455+ int error = 0;
72456
72457 VM_BUG_ON(start & ~PAGE_MASK);
72458 VM_BUG_ON(len != PAGE_ALIGN(len));
72459@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
72460 return -EINVAL;
72461 if (end == start)
72462 return 0;
72463+ if (end > TASK_SIZE)
72464+ return -EINVAL;
72465+
72466 vma = find_vma(current->mm, start);
72467 if (!vma || vma->vm_start > start)
72468 return -ENOMEM;
72469@@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
72470 for (nstart = start ; ; ) {
72471 vm_flags_t newflags;
72472
72473+#ifdef CONFIG_PAX_SEGMEXEC
72474+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
72475+ break;
72476+#endif
72477+
72478 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
72479
72480 newflags = vma->vm_flags | VM_LOCKED;
72481@@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
72482 lock_limit >>= PAGE_SHIFT;
72483
72484 /* check against resource limits */
72485+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
72486 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
72487 error = do_mlock(start, len, 1);
72488 up_write(&current->mm->mmap_sem);
72489@@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
72490 static int do_mlockall(int flags)
72491 {
72492 struct vm_area_struct * vma, * prev = NULL;
72493- unsigned int def_flags = 0;
72494
72495 if (flags & MCL_FUTURE)
72496- def_flags = VM_LOCKED;
72497- current->mm->def_flags = def_flags;
72498+ current->mm->def_flags |= VM_LOCKED;
72499+ else
72500+ current->mm->def_flags &= ~VM_LOCKED;
72501 if (flags == MCL_FUTURE)
72502 goto out;
72503
72504 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
72505 vm_flags_t newflags;
72506
72507+#ifdef CONFIG_PAX_SEGMEXEC
72508+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
72509+ break;
72510+#endif
72511+
72512+ BUG_ON(vma->vm_end > TASK_SIZE);
72513 newflags = vma->vm_flags | VM_LOCKED;
72514 if (!(flags & MCL_CURRENT))
72515 newflags &= ~VM_LOCKED;
72516@@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
72517 lock_limit >>= PAGE_SHIFT;
72518
72519 ret = -ENOMEM;
72520+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
72521 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
72522 capable(CAP_IPC_LOCK))
72523 ret = do_mlockall(flags);
72524diff --git a/mm/mmap.c b/mm/mmap.c
72525index fa1f274..86de476 100644
72526--- a/mm/mmap.c
72527+++ b/mm/mmap.c
72528@@ -47,6 +47,16 @@
72529 #define arch_rebalance_pgtables(addr, len) (addr)
72530 #endif
72531
72532+static inline void verify_mm_writelocked(struct mm_struct *mm)
72533+{
72534+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
72535+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72536+ up_read(&mm->mmap_sem);
72537+ BUG();
72538+ }
72539+#endif
72540+}
72541+
72542 static void unmap_region(struct mm_struct *mm,
72543 struct vm_area_struct *vma, struct vm_area_struct *prev,
72544 unsigned long start, unsigned long end);
72545@@ -72,22 +82,32 @@ static void unmap_region(struct mm_struct *mm,
72546 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
72547 *
72548 */
72549-pgprot_t protection_map[16] = {
72550+pgprot_t protection_map[16] __read_only = {
72551 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
72552 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
72553 };
72554
72555-pgprot_t vm_get_page_prot(unsigned long vm_flags)
72556+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
72557 {
72558- return __pgprot(pgprot_val(protection_map[vm_flags &
72559+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
72560 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
72561 pgprot_val(arch_vm_get_page_prot(vm_flags)));
72562+
72563+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72564+ if (!(__supported_pte_mask & _PAGE_NX) &&
72565+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
72566+ (vm_flags & (VM_READ | VM_WRITE)))
72567+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
72568+#endif
72569+
72570+ return prot;
72571 }
72572 EXPORT_SYMBOL(vm_get_page_prot);
72573
72574 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
72575 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
72576 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
72577+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
72578 /*
72579 * Make sure vm_committed_as in one cacheline and not cacheline shared with
72580 * other variables. It can be updated by several CPUs frequently.
72581@@ -229,6 +249,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
72582 struct vm_area_struct *next = vma->vm_next;
72583
72584 might_sleep();
72585+ BUG_ON(vma->vm_mirror);
72586 if (vma->vm_ops && vma->vm_ops->close)
72587 vma->vm_ops->close(vma);
72588 if (vma->vm_file) {
72589@@ -275,6 +296,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
72590 * not page aligned -Ram Gupta
72591 */
72592 rlim = rlimit(RLIMIT_DATA);
72593+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
72594 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
72595 (mm->end_data - mm->start_data) > rlim)
72596 goto out;
72597@@ -708,6 +730,12 @@ static int
72598 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
72599 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
72600 {
72601+
72602+#ifdef CONFIG_PAX_SEGMEXEC
72603+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
72604+ return 0;
72605+#endif
72606+
72607 if (is_mergeable_vma(vma, file, vm_flags) &&
72608 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
72609 if (vma->vm_pgoff == vm_pgoff)
72610@@ -727,6 +755,12 @@ static int
72611 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
72612 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
72613 {
72614+
72615+#ifdef CONFIG_PAX_SEGMEXEC
72616+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
72617+ return 0;
72618+#endif
72619+
72620 if (is_mergeable_vma(vma, file, vm_flags) &&
72621 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
72622 pgoff_t vm_pglen;
72623@@ -769,13 +803,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
72624 struct vm_area_struct *vma_merge(struct mm_struct *mm,
72625 struct vm_area_struct *prev, unsigned long addr,
72626 unsigned long end, unsigned long vm_flags,
72627- struct anon_vma *anon_vma, struct file *file,
72628+ struct anon_vma *anon_vma, struct file *file,
72629 pgoff_t pgoff, struct mempolicy *policy)
72630 {
72631 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
72632 struct vm_area_struct *area, *next;
72633 int err;
72634
72635+#ifdef CONFIG_PAX_SEGMEXEC
72636+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
72637+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
72638+
72639+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
72640+#endif
72641+
72642 /*
72643 * We later require that vma->vm_flags == vm_flags,
72644 * so this tests vma->vm_flags & VM_SPECIAL, too.
72645@@ -791,6 +832,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72646 if (next && next->vm_end == end) /* cases 6, 7, 8 */
72647 next = next->vm_next;
72648
72649+#ifdef CONFIG_PAX_SEGMEXEC
72650+ if (prev)
72651+ prev_m = pax_find_mirror_vma(prev);
72652+ if (area)
72653+ area_m = pax_find_mirror_vma(area);
72654+ if (next)
72655+ next_m = pax_find_mirror_vma(next);
72656+#endif
72657+
72658 /*
72659 * Can it merge with the predecessor?
72660 */
72661@@ -810,9 +860,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72662 /* cases 1, 6 */
72663 err = vma_adjust(prev, prev->vm_start,
72664 next->vm_end, prev->vm_pgoff, NULL);
72665- } else /* cases 2, 5, 7 */
72666+
72667+#ifdef CONFIG_PAX_SEGMEXEC
72668+ if (!err && prev_m)
72669+ err = vma_adjust(prev_m, prev_m->vm_start,
72670+ next_m->vm_end, prev_m->vm_pgoff, NULL);
72671+#endif
72672+
72673+ } else { /* cases 2, 5, 7 */
72674 err = vma_adjust(prev, prev->vm_start,
72675 end, prev->vm_pgoff, NULL);
72676+
72677+#ifdef CONFIG_PAX_SEGMEXEC
72678+ if (!err && prev_m)
72679+ err = vma_adjust(prev_m, prev_m->vm_start,
72680+ end_m, prev_m->vm_pgoff, NULL);
72681+#endif
72682+
72683+ }
72684 if (err)
72685 return NULL;
72686 khugepaged_enter_vma_merge(prev);
72687@@ -826,12 +891,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72688 mpol_equal(policy, vma_policy(next)) &&
72689 can_vma_merge_before(next, vm_flags,
72690 anon_vma, file, pgoff+pglen)) {
72691- if (prev && addr < prev->vm_end) /* case 4 */
72692+ if (prev && addr < prev->vm_end) { /* case 4 */
72693 err = vma_adjust(prev, prev->vm_start,
72694 addr, prev->vm_pgoff, NULL);
72695- else /* cases 3, 8 */
72696+
72697+#ifdef CONFIG_PAX_SEGMEXEC
72698+ if (!err && prev_m)
72699+ err = vma_adjust(prev_m, prev_m->vm_start,
72700+ addr_m, prev_m->vm_pgoff, NULL);
72701+#endif
72702+
72703+ } else { /* cases 3, 8 */
72704 err = vma_adjust(area, addr, next->vm_end,
72705 next->vm_pgoff - pglen, NULL);
72706+
72707+#ifdef CONFIG_PAX_SEGMEXEC
72708+ if (!err && area_m)
72709+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
72710+ next_m->vm_pgoff - pglen, NULL);
72711+#endif
72712+
72713+ }
72714 if (err)
72715 return NULL;
72716 khugepaged_enter_vma_merge(area);
72717@@ -940,14 +1020,11 @@ none:
72718 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
72719 struct file *file, long pages)
72720 {
72721- const unsigned long stack_flags
72722- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
72723-
72724 if (file) {
72725 mm->shared_vm += pages;
72726 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
72727 mm->exec_vm += pages;
72728- } else if (flags & stack_flags)
72729+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
72730 mm->stack_vm += pages;
72731 if (flags & (VM_RESERVED|VM_IO))
72732 mm->reserved_vm += pages;
72733@@ -985,7 +1062,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72734 * (the exception is when the underlying filesystem is noexec
72735 * mounted, in which case we dont add PROT_EXEC.)
72736 */
72737- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72738+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72739 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
72740 prot |= PROT_EXEC;
72741
72742@@ -1011,7 +1088,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72743 /* Obtain the address to map to. we verify (or select) it and ensure
72744 * that it represents a valid section of the address space.
72745 */
72746- addr = get_unmapped_area(file, addr, len, pgoff, flags);
72747+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
72748 if (addr & ~PAGE_MASK)
72749 return addr;
72750
72751@@ -1022,6 +1099,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72752 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
72753 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
72754
72755+#ifdef CONFIG_PAX_MPROTECT
72756+ if (mm->pax_flags & MF_PAX_MPROTECT) {
72757+#ifndef CONFIG_PAX_MPROTECT_COMPAT
72758+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
72759+ gr_log_rwxmmap(file);
72760+
72761+#ifdef CONFIG_PAX_EMUPLT
72762+ vm_flags &= ~VM_EXEC;
72763+#else
72764+ return -EPERM;
72765+#endif
72766+
72767+ }
72768+
72769+ if (!(vm_flags & VM_EXEC))
72770+ vm_flags &= ~VM_MAYEXEC;
72771+#else
72772+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72773+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72774+#endif
72775+ else
72776+ vm_flags &= ~VM_MAYWRITE;
72777+ }
72778+#endif
72779+
72780+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72781+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
72782+ vm_flags &= ~VM_PAGEEXEC;
72783+#endif
72784+
72785 if (flags & MAP_LOCKED)
72786 if (!can_do_mlock())
72787 return -EPERM;
72788@@ -1033,6 +1140,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72789 locked += mm->locked_vm;
72790 lock_limit = rlimit(RLIMIT_MEMLOCK);
72791 lock_limit >>= PAGE_SHIFT;
72792+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
72793 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
72794 return -EAGAIN;
72795 }
72796@@ -1099,6 +1207,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72797 }
72798 }
72799
72800+ if (!gr_acl_handle_mmap(file, prot))
72801+ return -EACCES;
72802+
72803 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
72804 }
72805
72806@@ -1175,7 +1286,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
72807 vm_flags_t vm_flags = vma->vm_flags;
72808
72809 /* If it was private or non-writable, the write bit is already clear */
72810- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
72811+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
72812 return 0;
72813
72814 /* The backer wishes to know when pages are first written to? */
72815@@ -1224,14 +1335,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
72816 unsigned long charged = 0;
72817 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
72818
72819+#ifdef CONFIG_PAX_SEGMEXEC
72820+ struct vm_area_struct *vma_m = NULL;
72821+#endif
72822+
72823+ /*
72824+ * mm->mmap_sem is required to protect against another thread
72825+ * changing the mappings in case we sleep.
72826+ */
72827+ verify_mm_writelocked(mm);
72828+
72829 /* Clear old maps */
72830 error = -ENOMEM;
72831-munmap_back:
72832 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72833 if (vma && vma->vm_start < addr + len) {
72834 if (do_munmap(mm, addr, len))
72835 return -ENOMEM;
72836- goto munmap_back;
72837+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72838+ BUG_ON(vma && vma->vm_start < addr + len);
72839 }
72840
72841 /* Check against address space limit. */
72842@@ -1280,6 +1401,16 @@ munmap_back:
72843 goto unacct_error;
72844 }
72845
72846+#ifdef CONFIG_PAX_SEGMEXEC
72847+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
72848+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72849+ if (!vma_m) {
72850+ error = -ENOMEM;
72851+ goto free_vma;
72852+ }
72853+ }
72854+#endif
72855+
72856 vma->vm_mm = mm;
72857 vma->vm_start = addr;
72858 vma->vm_end = addr + len;
72859@@ -1304,6 +1435,19 @@ munmap_back:
72860 error = file->f_op->mmap(file, vma);
72861 if (error)
72862 goto unmap_and_free_vma;
72863+
72864+#ifdef CONFIG_PAX_SEGMEXEC
72865+ if (vma_m && (vm_flags & VM_EXECUTABLE))
72866+ added_exe_file_vma(mm);
72867+#endif
72868+
72869+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72870+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
72871+ vma->vm_flags |= VM_PAGEEXEC;
72872+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72873+ }
72874+#endif
72875+
72876 if (vm_flags & VM_EXECUTABLE)
72877 added_exe_file_vma(mm);
72878
72879@@ -1341,6 +1485,11 @@ munmap_back:
72880 vma_link(mm, vma, prev, rb_link, rb_parent);
72881 file = vma->vm_file;
72882
72883+#ifdef CONFIG_PAX_SEGMEXEC
72884+ if (vma_m)
72885+ BUG_ON(pax_mirror_vma(vma_m, vma));
72886+#endif
72887+
72888 /* Once vma denies write, undo our temporary denial count */
72889 if (correct_wcount)
72890 atomic_inc(&inode->i_writecount);
72891@@ -1349,6 +1498,7 @@ out:
72892
72893 mm->total_vm += len >> PAGE_SHIFT;
72894 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
72895+ track_exec_limit(mm, addr, addr + len, vm_flags);
72896 if (vm_flags & VM_LOCKED) {
72897 if (!mlock_vma_pages_range(vma, addr, addr + len))
72898 mm->locked_vm += (len >> PAGE_SHIFT);
72899@@ -1370,6 +1520,12 @@ unmap_and_free_vma:
72900 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
72901 charged = 0;
72902 free_vma:
72903+
72904+#ifdef CONFIG_PAX_SEGMEXEC
72905+ if (vma_m)
72906+ kmem_cache_free(vm_area_cachep, vma_m);
72907+#endif
72908+
72909 kmem_cache_free(vm_area_cachep, vma);
72910 unacct_error:
72911 if (charged)
72912@@ -1377,6 +1533,44 @@ unacct_error:
72913 return error;
72914 }
72915
72916+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
72917+{
72918+ if (!vma) {
72919+#ifdef CONFIG_STACK_GROWSUP
72920+ if (addr > sysctl_heap_stack_gap)
72921+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
72922+ else
72923+ vma = find_vma(current->mm, 0);
72924+ if (vma && (vma->vm_flags & VM_GROWSUP))
72925+ return false;
72926+#endif
72927+ return true;
72928+ }
72929+
72930+ if (addr + len > vma->vm_start)
72931+ return false;
72932+
72933+ if (vma->vm_flags & VM_GROWSDOWN)
72934+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
72935+#ifdef CONFIG_STACK_GROWSUP
72936+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
72937+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
72938+#endif
72939+
72940+ return true;
72941+}
72942+
72943+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
72944+{
72945+ if (vma->vm_start < len)
72946+ return -ENOMEM;
72947+ if (!(vma->vm_flags & VM_GROWSDOWN))
72948+ return vma->vm_start - len;
72949+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
72950+ return vma->vm_start - len - sysctl_heap_stack_gap;
72951+ return -ENOMEM;
72952+}
72953+
72954 /* Get an address range which is currently unmapped.
72955 * For shmat() with addr=0.
72956 *
72957@@ -1403,18 +1597,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
72958 if (flags & MAP_FIXED)
72959 return addr;
72960
72961+#ifdef CONFIG_PAX_RANDMMAP
72962+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
72963+#endif
72964+
72965 if (addr) {
72966 addr = PAGE_ALIGN(addr);
72967- vma = find_vma(mm, addr);
72968- if (TASK_SIZE - len >= addr &&
72969- (!vma || addr + len <= vma->vm_start))
72970- return addr;
72971+ if (TASK_SIZE - len >= addr) {
72972+ vma = find_vma(mm, addr);
72973+ if (check_heap_stack_gap(vma, addr, len))
72974+ return addr;
72975+ }
72976 }
72977 if (len > mm->cached_hole_size) {
72978- start_addr = addr = mm->free_area_cache;
72979+ start_addr = addr = mm->free_area_cache;
72980 } else {
72981- start_addr = addr = TASK_UNMAPPED_BASE;
72982- mm->cached_hole_size = 0;
72983+ start_addr = addr = mm->mmap_base;
72984+ mm->cached_hole_size = 0;
72985 }
72986
72987 full_search:
72988@@ -1425,34 +1624,40 @@ full_search:
72989 * Start a new search - just in case we missed
72990 * some holes.
72991 */
72992- if (start_addr != TASK_UNMAPPED_BASE) {
72993- addr = TASK_UNMAPPED_BASE;
72994- start_addr = addr;
72995+ if (start_addr != mm->mmap_base) {
72996+ start_addr = addr = mm->mmap_base;
72997 mm->cached_hole_size = 0;
72998 goto full_search;
72999 }
73000 return -ENOMEM;
73001 }
73002- if (!vma || addr + len <= vma->vm_start) {
73003- /*
73004- * Remember the place where we stopped the search:
73005- */
73006- mm->free_area_cache = addr + len;
73007- return addr;
73008- }
73009+ if (check_heap_stack_gap(vma, addr, len))
73010+ break;
73011 if (addr + mm->cached_hole_size < vma->vm_start)
73012 mm->cached_hole_size = vma->vm_start - addr;
73013 addr = vma->vm_end;
73014 }
73015+
73016+ /*
73017+ * Remember the place where we stopped the search:
73018+ */
73019+ mm->free_area_cache = addr + len;
73020+ return addr;
73021 }
73022 #endif
73023
73024 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
73025 {
73026+
73027+#ifdef CONFIG_PAX_SEGMEXEC
73028+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
73029+ return;
73030+#endif
73031+
73032 /*
73033 * Is this a new hole at the lowest possible address?
73034 */
73035- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
73036+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
73037 mm->free_area_cache = addr;
73038 }
73039
73040@@ -1468,7 +1673,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
73041 {
73042 struct vm_area_struct *vma;
73043 struct mm_struct *mm = current->mm;
73044- unsigned long addr = addr0, start_addr;
73045+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
73046
73047 /* requested length too big for entire address space */
73048 if (len > TASK_SIZE)
73049@@ -1477,13 +1682,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
73050 if (flags & MAP_FIXED)
73051 return addr;
73052
73053+#ifdef CONFIG_PAX_RANDMMAP
73054+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
73055+#endif
73056+
73057 /* requesting a specific address */
73058 if (addr) {
73059 addr = PAGE_ALIGN(addr);
73060- vma = find_vma(mm, addr);
73061- if (TASK_SIZE - len >= addr &&
73062- (!vma || addr + len <= vma->vm_start))
73063- return addr;
73064+ if (TASK_SIZE - len >= addr) {
73065+ vma = find_vma(mm, addr);
73066+ if (check_heap_stack_gap(vma, addr, len))
73067+ return addr;
73068+ }
73069 }
73070
73071 /* check if free_area_cache is useful for us */
73072@@ -1507,7 +1717,7 @@ try_again:
73073 * return with success:
73074 */
73075 vma = find_vma(mm, addr);
73076- if (!vma || addr+len <= vma->vm_start)
73077+ if (check_heap_stack_gap(vma, addr, len))
73078 /* remember the address as a hint for next time */
73079 return (mm->free_area_cache = addr);
73080
73081@@ -1516,8 +1726,8 @@ try_again:
73082 mm->cached_hole_size = vma->vm_start - addr;
73083
73084 /* try just below the current vma->vm_start */
73085- addr = vma->vm_start-len;
73086- } while (len < vma->vm_start);
73087+ addr = skip_heap_stack_gap(vma, len);
73088+ } while (!IS_ERR_VALUE(addr));
73089
73090 fail:
73091 /*
73092@@ -1540,13 +1750,21 @@ fail:
73093 * can happen with large stack limits and large mmap()
73094 * allocations.
73095 */
73096+ mm->mmap_base = TASK_UNMAPPED_BASE;
73097+
73098+#ifdef CONFIG_PAX_RANDMMAP
73099+ if (mm->pax_flags & MF_PAX_RANDMMAP)
73100+ mm->mmap_base += mm->delta_mmap;
73101+#endif
73102+
73103+ mm->free_area_cache = mm->mmap_base;
73104 mm->cached_hole_size = ~0UL;
73105- mm->free_area_cache = TASK_UNMAPPED_BASE;
73106 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
73107 /*
73108 * Restore the topdown base:
73109 */
73110- mm->free_area_cache = mm->mmap_base;
73111+ mm->mmap_base = base;
73112+ mm->free_area_cache = base;
73113 mm->cached_hole_size = ~0UL;
73114
73115 return addr;
73116@@ -1555,6 +1773,12 @@ fail:
73117
73118 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
73119 {
73120+
73121+#ifdef CONFIG_PAX_SEGMEXEC
73122+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
73123+ return;
73124+#endif
73125+
73126 /*
73127 * Is this a new hole at the highest possible address?
73128 */
73129@@ -1562,8 +1786,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
73130 mm->free_area_cache = addr;
73131
73132 /* dont allow allocations above current base */
73133- if (mm->free_area_cache > mm->mmap_base)
73134+ if (mm->free_area_cache > mm->mmap_base) {
73135 mm->free_area_cache = mm->mmap_base;
73136+ mm->cached_hole_size = ~0UL;
73137+ }
73138 }
73139
73140 unsigned long
73141@@ -1662,6 +1888,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
73142 return vma;
73143 }
73144
73145+#ifdef CONFIG_PAX_SEGMEXEC
73146+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
73147+{
73148+ struct vm_area_struct *vma_m;
73149+
73150+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
73151+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
73152+ BUG_ON(vma->vm_mirror);
73153+ return NULL;
73154+ }
73155+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
73156+ vma_m = vma->vm_mirror;
73157+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
73158+ BUG_ON(vma->vm_file != vma_m->vm_file);
73159+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
73160+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
73161+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
73162+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
73163+ return vma_m;
73164+}
73165+#endif
73166+
73167 /*
73168 * Verify that the stack growth is acceptable and
73169 * update accounting. This is shared with both the
73170@@ -1678,6 +1926,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
73171 return -ENOMEM;
73172
73173 /* Stack limit test */
73174+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
73175 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
73176 return -ENOMEM;
73177
73178@@ -1688,6 +1937,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
73179 locked = mm->locked_vm + grow;
73180 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
73181 limit >>= PAGE_SHIFT;
73182+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
73183 if (locked > limit && !capable(CAP_IPC_LOCK))
73184 return -ENOMEM;
73185 }
73186@@ -1718,37 +1968,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
73187 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
73188 * vma is the last one with address > vma->vm_end. Have to extend vma.
73189 */
73190+#ifndef CONFIG_IA64
73191+static
73192+#endif
73193 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
73194 {
73195 int error;
73196+ bool locknext;
73197
73198 if (!(vma->vm_flags & VM_GROWSUP))
73199 return -EFAULT;
73200
73201+ /* Also guard against wrapping around to address 0. */
73202+ if (address < PAGE_ALIGN(address+1))
73203+ address = PAGE_ALIGN(address+1);
73204+ else
73205+ return -ENOMEM;
73206+
73207 /*
73208 * We must make sure the anon_vma is allocated
73209 * so that the anon_vma locking is not a noop.
73210 */
73211 if (unlikely(anon_vma_prepare(vma)))
73212 return -ENOMEM;
73213+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
73214+ if (locknext && anon_vma_prepare(vma->vm_next))
73215+ return -ENOMEM;
73216 vma_lock_anon_vma(vma);
73217+ if (locknext)
73218+ vma_lock_anon_vma(vma->vm_next);
73219
73220 /*
73221 * vma->vm_start/vm_end cannot change under us because the caller
73222 * is required to hold the mmap_sem in read mode. We need the
73223- * anon_vma lock to serialize against concurrent expand_stacks.
73224- * Also guard against wrapping around to address 0.
73225+ * anon_vma locks to serialize against concurrent expand_stacks
73226+ * and expand_upwards.
73227 */
73228- if (address < PAGE_ALIGN(address+4))
73229- address = PAGE_ALIGN(address+4);
73230- else {
73231- vma_unlock_anon_vma(vma);
73232- return -ENOMEM;
73233- }
73234 error = 0;
73235
73236 /* Somebody else might have raced and expanded it already */
73237- if (address > vma->vm_end) {
73238+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
73239+ error = -ENOMEM;
73240+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
73241 unsigned long size, grow;
73242
73243 size = address - vma->vm_start;
73244@@ -1763,6 +2024,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
73245 }
73246 }
73247 }
73248+ if (locknext)
73249+ vma_unlock_anon_vma(vma->vm_next);
73250 vma_unlock_anon_vma(vma);
73251 khugepaged_enter_vma_merge(vma);
73252 return error;
73253@@ -1776,6 +2039,8 @@ int expand_downwards(struct vm_area_struct *vma,
73254 unsigned long address)
73255 {
73256 int error;
73257+ bool lockprev = false;
73258+ struct vm_area_struct *prev;
73259
73260 /*
73261 * We must make sure the anon_vma is allocated
73262@@ -1789,6 +2054,15 @@ int expand_downwards(struct vm_area_struct *vma,
73263 if (error)
73264 return error;
73265
73266+ prev = vma->vm_prev;
73267+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
73268+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
73269+#endif
73270+ if (lockprev && anon_vma_prepare(prev))
73271+ return -ENOMEM;
73272+ if (lockprev)
73273+ vma_lock_anon_vma(prev);
73274+
73275 vma_lock_anon_vma(vma);
73276
73277 /*
73278@@ -1798,9 +2072,17 @@ int expand_downwards(struct vm_area_struct *vma,
73279 */
73280
73281 /* Somebody else might have raced and expanded it already */
73282- if (address < vma->vm_start) {
73283+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
73284+ error = -ENOMEM;
73285+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
73286 unsigned long size, grow;
73287
73288+#ifdef CONFIG_PAX_SEGMEXEC
73289+ struct vm_area_struct *vma_m;
73290+
73291+ vma_m = pax_find_mirror_vma(vma);
73292+#endif
73293+
73294 size = vma->vm_end - address;
73295 grow = (vma->vm_start - address) >> PAGE_SHIFT;
73296
73297@@ -1810,11 +2092,22 @@ int expand_downwards(struct vm_area_struct *vma,
73298 if (!error) {
73299 vma->vm_start = address;
73300 vma->vm_pgoff -= grow;
73301+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
73302+
73303+#ifdef CONFIG_PAX_SEGMEXEC
73304+ if (vma_m) {
73305+ vma_m->vm_start -= grow << PAGE_SHIFT;
73306+ vma_m->vm_pgoff -= grow;
73307+ }
73308+#endif
73309+
73310 perf_event_mmap(vma);
73311 }
73312 }
73313 }
73314 vma_unlock_anon_vma(vma);
73315+ if (lockprev)
73316+ vma_unlock_anon_vma(prev);
73317 khugepaged_enter_vma_merge(vma);
73318 return error;
73319 }
73320@@ -1886,6 +2179,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
73321 do {
73322 long nrpages = vma_pages(vma);
73323
73324+#ifdef CONFIG_PAX_SEGMEXEC
73325+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
73326+ vma = remove_vma(vma);
73327+ continue;
73328+ }
73329+#endif
73330+
73331 if (vma->vm_flags & VM_ACCOUNT)
73332 nr_accounted += nrpages;
73333 mm->total_vm -= nrpages;
73334@@ -1932,6 +2232,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
73335 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
73336 vma->vm_prev = NULL;
73337 do {
73338+
73339+#ifdef CONFIG_PAX_SEGMEXEC
73340+ if (vma->vm_mirror) {
73341+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
73342+ vma->vm_mirror->vm_mirror = NULL;
73343+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
73344+ vma->vm_mirror = NULL;
73345+ }
73346+#endif
73347+
73348 rb_erase(&vma->vm_rb, &mm->mm_rb);
73349 mm->map_count--;
73350 tail_vma = vma;
73351@@ -1960,14 +2270,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73352 struct vm_area_struct *new;
73353 int err = -ENOMEM;
73354
73355+#ifdef CONFIG_PAX_SEGMEXEC
73356+ struct vm_area_struct *vma_m, *new_m = NULL;
73357+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
73358+#endif
73359+
73360 if (is_vm_hugetlb_page(vma) && (addr &
73361 ~(huge_page_mask(hstate_vma(vma)))))
73362 return -EINVAL;
73363
73364+#ifdef CONFIG_PAX_SEGMEXEC
73365+ vma_m = pax_find_mirror_vma(vma);
73366+#endif
73367+
73368 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
73369 if (!new)
73370 goto out_err;
73371
73372+#ifdef CONFIG_PAX_SEGMEXEC
73373+ if (vma_m) {
73374+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
73375+ if (!new_m) {
73376+ kmem_cache_free(vm_area_cachep, new);
73377+ goto out_err;
73378+ }
73379+ }
73380+#endif
73381+
73382 /* most fields are the same, copy all, and then fixup */
73383 *new = *vma;
73384
73385@@ -1980,6 +2309,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73386 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
73387 }
73388
73389+#ifdef CONFIG_PAX_SEGMEXEC
73390+ if (vma_m) {
73391+ *new_m = *vma_m;
73392+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
73393+ new_m->vm_mirror = new;
73394+ new->vm_mirror = new_m;
73395+
73396+ if (new_below)
73397+ new_m->vm_end = addr_m;
73398+ else {
73399+ new_m->vm_start = addr_m;
73400+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
73401+ }
73402+ }
73403+#endif
73404+
73405 pol = mpol_dup(vma_policy(vma));
73406 if (IS_ERR(pol)) {
73407 err = PTR_ERR(pol);
73408@@ -2005,6 +2350,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73409 else
73410 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
73411
73412+#ifdef CONFIG_PAX_SEGMEXEC
73413+ if (!err && vma_m) {
73414+ if (anon_vma_clone(new_m, vma_m))
73415+ goto out_free_mpol;
73416+
73417+ mpol_get(pol);
73418+ vma_set_policy(new_m, pol);
73419+
73420+ if (new_m->vm_file) {
73421+ get_file(new_m->vm_file);
73422+ if (vma_m->vm_flags & VM_EXECUTABLE)
73423+ added_exe_file_vma(mm);
73424+ }
73425+
73426+ if (new_m->vm_ops && new_m->vm_ops->open)
73427+ new_m->vm_ops->open(new_m);
73428+
73429+ if (new_below)
73430+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
73431+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
73432+ else
73433+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
73434+
73435+ if (err) {
73436+ if (new_m->vm_ops && new_m->vm_ops->close)
73437+ new_m->vm_ops->close(new_m);
73438+ if (new_m->vm_file) {
73439+ if (vma_m->vm_flags & VM_EXECUTABLE)
73440+ removed_exe_file_vma(mm);
73441+ fput(new_m->vm_file);
73442+ }
73443+ mpol_put(pol);
73444+ }
73445+ }
73446+#endif
73447+
73448 /* Success. */
73449 if (!err)
73450 return 0;
73451@@ -2017,10 +2398,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73452 removed_exe_file_vma(mm);
73453 fput(new->vm_file);
73454 }
73455- unlink_anon_vmas(new);
73456 out_free_mpol:
73457 mpol_put(pol);
73458 out_free_vma:
73459+
73460+#ifdef CONFIG_PAX_SEGMEXEC
73461+ if (new_m) {
73462+ unlink_anon_vmas(new_m);
73463+ kmem_cache_free(vm_area_cachep, new_m);
73464+ }
73465+#endif
73466+
73467+ unlink_anon_vmas(new);
73468 kmem_cache_free(vm_area_cachep, new);
73469 out_err:
73470 return err;
73471@@ -2033,6 +2422,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
73472 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
73473 unsigned long addr, int new_below)
73474 {
73475+
73476+#ifdef CONFIG_PAX_SEGMEXEC
73477+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
73478+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
73479+ if (mm->map_count >= sysctl_max_map_count-1)
73480+ return -ENOMEM;
73481+ } else
73482+#endif
73483+
73484 if (mm->map_count >= sysctl_max_map_count)
73485 return -ENOMEM;
73486
73487@@ -2044,11 +2442,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
73488 * work. This now handles partial unmappings.
73489 * Jeremy Fitzhardinge <jeremy@goop.org>
73490 */
73491+#ifdef CONFIG_PAX_SEGMEXEC
73492 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73493 {
73494+ int ret = __do_munmap(mm, start, len);
73495+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
73496+ return ret;
73497+
73498+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
73499+}
73500+
73501+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73502+#else
73503+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73504+#endif
73505+{
73506 unsigned long end;
73507 struct vm_area_struct *vma, *prev, *last;
73508
73509+ /*
73510+ * mm->mmap_sem is required to protect against another thread
73511+ * changing the mappings in case we sleep.
73512+ */
73513+ verify_mm_writelocked(mm);
73514+
73515 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
73516 return -EINVAL;
73517
73518@@ -2123,6 +2540,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
73519 /* Fix up all other VM information */
73520 remove_vma_list(mm, vma);
73521
73522+ track_exec_limit(mm, start, end, 0UL);
73523+
73524 return 0;
73525 }
73526
73527@@ -2131,6 +2550,13 @@ int vm_munmap(unsigned long start, size_t len)
73528 int ret;
73529 struct mm_struct *mm = current->mm;
73530
73531+
73532+#ifdef CONFIG_PAX_SEGMEXEC
73533+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
73534+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
73535+ return -EINVAL;
73536+#endif
73537+
73538 down_write(&mm->mmap_sem);
73539 ret = do_munmap(mm, start, len);
73540 up_write(&mm->mmap_sem);
73541@@ -2144,16 +2570,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
73542 return vm_munmap(addr, len);
73543 }
73544
73545-static inline void verify_mm_writelocked(struct mm_struct *mm)
73546-{
73547-#ifdef CONFIG_DEBUG_VM
73548- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
73549- WARN_ON(1);
73550- up_read(&mm->mmap_sem);
73551- }
73552-#endif
73553-}
73554-
73555 /*
73556 * this is really a simplified "do_mmap". it only handles
73557 * anonymous maps. eventually we may be able to do some
73558@@ -2167,6 +2583,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
73559 struct rb_node ** rb_link, * rb_parent;
73560 pgoff_t pgoff = addr >> PAGE_SHIFT;
73561 int error;
73562+ unsigned long charged;
73563
73564 len = PAGE_ALIGN(len);
73565 if (!len)
73566@@ -2174,16 +2591,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
73567
73568 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
73569
73570+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
73571+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
73572+ flags &= ~VM_EXEC;
73573+
73574+#ifdef CONFIG_PAX_MPROTECT
73575+ if (mm->pax_flags & MF_PAX_MPROTECT)
73576+ flags &= ~VM_MAYEXEC;
73577+#endif
73578+
73579+ }
73580+#endif
73581+
73582 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
73583 if (error & ~PAGE_MASK)
73584 return error;
73585
73586+ charged = len >> PAGE_SHIFT;
73587+
73588 /*
73589 * mlock MCL_FUTURE?
73590 */
73591 if (mm->def_flags & VM_LOCKED) {
73592 unsigned long locked, lock_limit;
73593- locked = len >> PAGE_SHIFT;
73594+ locked = charged;
73595 locked += mm->locked_vm;
73596 lock_limit = rlimit(RLIMIT_MEMLOCK);
73597 lock_limit >>= PAGE_SHIFT;
73598@@ -2200,22 +2631,22 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
73599 /*
73600 * Clear old maps. this also does some error checking for us
73601 */
73602- munmap_back:
73603 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73604 if (vma && vma->vm_start < addr + len) {
73605 if (do_munmap(mm, addr, len))
73606 return -ENOMEM;
73607- goto munmap_back;
73608- }
73609+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
73610+ BUG_ON(vma && vma->vm_start < addr + len);
73611+ }
73612
73613 /* Check against address space limits *after* clearing old maps... */
73614- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
73615+ if (!may_expand_vm(mm, charged))
73616 return -ENOMEM;
73617
73618 if (mm->map_count > sysctl_max_map_count)
73619 return -ENOMEM;
73620
73621- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
73622+ if (security_vm_enough_memory_mm(mm, charged))
73623 return -ENOMEM;
73624
73625 /* Can we just expand an old private anonymous mapping? */
73626@@ -2229,7 +2660,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
73627 */
73628 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73629 if (!vma) {
73630- vm_unacct_memory(len >> PAGE_SHIFT);
73631+ vm_unacct_memory(charged);
73632 return -ENOMEM;
73633 }
73634
73635@@ -2243,11 +2674,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
73636 vma_link(mm, vma, prev, rb_link, rb_parent);
73637 out:
73638 perf_event_mmap(vma);
73639- mm->total_vm += len >> PAGE_SHIFT;
73640+ mm->total_vm += charged;
73641 if (flags & VM_LOCKED) {
73642 if (!mlock_vma_pages_range(vma, addr, addr + len))
73643- mm->locked_vm += (len >> PAGE_SHIFT);
73644+ mm->locked_vm += charged;
73645 }
73646+ track_exec_limit(mm, addr, addr + len, flags);
73647 return addr;
73648 }
73649
73650@@ -2305,6 +2737,7 @@ void exit_mmap(struct mm_struct *mm)
73651 while (vma) {
73652 if (vma->vm_flags & VM_ACCOUNT)
73653 nr_accounted += vma_pages(vma);
73654+ vma->vm_mirror = NULL;
73655 vma = remove_vma(vma);
73656 }
73657 vm_unacct_memory(nr_accounted);
73658@@ -2321,6 +2754,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
73659 struct vm_area_struct * __vma, * prev;
73660 struct rb_node ** rb_link, * rb_parent;
73661
73662+#ifdef CONFIG_PAX_SEGMEXEC
73663+ struct vm_area_struct *vma_m = NULL;
73664+#endif
73665+
73666+ if (security_mmap_addr(vma->vm_start))
73667+ return -EPERM;
73668+
73669 /*
73670 * The vm_pgoff of a purely anonymous vma should be irrelevant
73671 * until its first write fault, when page's anon_vma and index
73672@@ -2347,7 +2787,21 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
73673 if (vma->vm_file && uprobe_mmap(vma))
73674 return -EINVAL;
73675
73676+#ifdef CONFIG_PAX_SEGMEXEC
73677+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
73678+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73679+ if (!vma_m)
73680+ return -ENOMEM;
73681+ }
73682+#endif
73683+
73684 vma_link(mm, vma, prev, rb_link, rb_parent);
73685+
73686+#ifdef CONFIG_PAX_SEGMEXEC
73687+ if (vma_m)
73688+ BUG_ON(pax_mirror_vma(vma_m, vma));
73689+#endif
73690+
73691 return 0;
73692 }
73693
73694@@ -2366,6 +2820,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
73695 struct mempolicy *pol;
73696 bool faulted_in_anon_vma = true;
73697
73698+ BUG_ON(vma->vm_mirror);
73699+
73700 /*
73701 * If anonymous vma has not yet been faulted, update new pgoff
73702 * to match new location, to increase its chance of merging.
73703@@ -2437,6 +2893,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
73704 return NULL;
73705 }
73706
73707+#ifdef CONFIG_PAX_SEGMEXEC
73708+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
73709+{
73710+ struct vm_area_struct *prev_m;
73711+ struct rb_node **rb_link_m, *rb_parent_m;
73712+ struct mempolicy *pol_m;
73713+
73714+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
73715+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
73716+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
73717+ *vma_m = *vma;
73718+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
73719+ if (anon_vma_clone(vma_m, vma))
73720+ return -ENOMEM;
73721+ pol_m = vma_policy(vma_m);
73722+ mpol_get(pol_m);
73723+ vma_set_policy(vma_m, pol_m);
73724+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
73725+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
73726+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
73727+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
73728+ if (vma_m->vm_file)
73729+ get_file(vma_m->vm_file);
73730+ if (vma_m->vm_ops && vma_m->vm_ops->open)
73731+ vma_m->vm_ops->open(vma_m);
73732+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
73733+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
73734+ vma_m->vm_mirror = vma;
73735+ vma->vm_mirror = vma_m;
73736+ return 0;
73737+}
73738+#endif
73739+
73740 /*
73741 * Return true if the calling process may expand its vm space by the passed
73742 * number of pages
73743@@ -2448,6 +2937,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
73744
73745 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
73746
73747+#ifdef CONFIG_PAX_RANDMMAP
73748+ if (mm->pax_flags & MF_PAX_RANDMMAP)
73749+ cur -= mm->brk_gap;
73750+#endif
73751+
73752+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
73753 if (cur + npages > lim)
73754 return 0;
73755 return 1;
73756@@ -2518,6 +3013,22 @@ int install_special_mapping(struct mm_struct *mm,
73757 vma->vm_start = addr;
73758 vma->vm_end = addr + len;
73759
73760+#ifdef CONFIG_PAX_MPROTECT
73761+ if (mm->pax_flags & MF_PAX_MPROTECT) {
73762+#ifndef CONFIG_PAX_MPROTECT_COMPAT
73763+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
73764+ return -EPERM;
73765+ if (!(vm_flags & VM_EXEC))
73766+ vm_flags &= ~VM_MAYEXEC;
73767+#else
73768+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
73769+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
73770+#endif
73771+ else
73772+ vm_flags &= ~VM_MAYWRITE;
73773+ }
73774+#endif
73775+
73776 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
73777 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
73778
73779diff --git a/mm/mprotect.c b/mm/mprotect.c
73780index a409926..8b32e6d 100644
73781--- a/mm/mprotect.c
73782+++ b/mm/mprotect.c
73783@@ -23,10 +23,17 @@
73784 #include <linux/mmu_notifier.h>
73785 #include <linux/migrate.h>
73786 #include <linux/perf_event.h>
73787+
73788+#ifdef CONFIG_PAX_MPROTECT
73789+#include <linux/elf.h>
73790+#include <linux/binfmts.h>
73791+#endif
73792+
73793 #include <asm/uaccess.h>
73794 #include <asm/pgtable.h>
73795 #include <asm/cacheflush.h>
73796 #include <asm/tlbflush.h>
73797+#include <asm/mmu_context.h>
73798
73799 #ifndef pgprot_modify
73800 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
73801@@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
73802 flush_tlb_range(vma, start, end);
73803 }
73804
73805+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
73806+/* called while holding the mmap semaphor for writing except stack expansion */
73807+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
73808+{
73809+ unsigned long oldlimit, newlimit = 0UL;
73810+
73811+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
73812+ return;
73813+
73814+ spin_lock(&mm->page_table_lock);
73815+ oldlimit = mm->context.user_cs_limit;
73816+ if ((prot & VM_EXEC) && oldlimit < end)
73817+ /* USER_CS limit moved up */
73818+ newlimit = end;
73819+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
73820+ /* USER_CS limit moved down */
73821+ newlimit = start;
73822+
73823+ if (newlimit) {
73824+ mm->context.user_cs_limit = newlimit;
73825+
73826+#ifdef CONFIG_SMP
73827+ wmb();
73828+ cpus_clear(mm->context.cpu_user_cs_mask);
73829+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
73830+#endif
73831+
73832+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
73833+ }
73834+ spin_unlock(&mm->page_table_lock);
73835+ if (newlimit == end) {
73836+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
73837+
73838+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
73839+ if (is_vm_hugetlb_page(vma))
73840+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
73841+ else
73842+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
73843+ }
73844+}
73845+#endif
73846+
73847 int
73848 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
73849 unsigned long start, unsigned long end, unsigned long newflags)
73850@@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
73851 int error;
73852 int dirty_accountable = 0;
73853
73854+#ifdef CONFIG_PAX_SEGMEXEC
73855+ struct vm_area_struct *vma_m = NULL;
73856+ unsigned long start_m, end_m;
73857+
73858+ start_m = start + SEGMEXEC_TASK_SIZE;
73859+ end_m = end + SEGMEXEC_TASK_SIZE;
73860+#endif
73861+
73862 if (newflags == oldflags) {
73863 *pprev = vma;
73864 return 0;
73865 }
73866
73867+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
73868+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
73869+
73870+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
73871+ return -ENOMEM;
73872+
73873+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
73874+ return -ENOMEM;
73875+ }
73876+
73877 /*
73878 * If we make a private mapping writable we increase our commit;
73879 * but (without finer accounting) cannot reduce our commit if we
73880@@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
73881 }
73882 }
73883
73884+#ifdef CONFIG_PAX_SEGMEXEC
73885+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
73886+ if (start != vma->vm_start) {
73887+ error = split_vma(mm, vma, start, 1);
73888+ if (error)
73889+ goto fail;
73890+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
73891+ *pprev = (*pprev)->vm_next;
73892+ }
73893+
73894+ if (end != vma->vm_end) {
73895+ error = split_vma(mm, vma, end, 0);
73896+ if (error)
73897+ goto fail;
73898+ }
73899+
73900+ if (pax_find_mirror_vma(vma)) {
73901+ error = __do_munmap(mm, start_m, end_m - start_m);
73902+ if (error)
73903+ goto fail;
73904+ } else {
73905+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73906+ if (!vma_m) {
73907+ error = -ENOMEM;
73908+ goto fail;
73909+ }
73910+ vma->vm_flags = newflags;
73911+ error = pax_mirror_vma(vma_m, vma);
73912+ if (error) {
73913+ vma->vm_flags = oldflags;
73914+ goto fail;
73915+ }
73916+ }
73917+ }
73918+#endif
73919+
73920 /*
73921 * First try to merge with previous and/or next vma.
73922 */
73923@@ -204,9 +307,21 @@ success:
73924 * vm_flags and vm_page_prot are protected by the mmap_sem
73925 * held in write mode.
73926 */
73927+
73928+#ifdef CONFIG_PAX_SEGMEXEC
73929+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
73930+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
73931+#endif
73932+
73933 vma->vm_flags = newflags;
73934+
73935+#ifdef CONFIG_PAX_MPROTECT
73936+ if (mm->binfmt && mm->binfmt->handle_mprotect)
73937+ mm->binfmt->handle_mprotect(vma, newflags);
73938+#endif
73939+
73940 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
73941- vm_get_page_prot(newflags));
73942+ vm_get_page_prot(vma->vm_flags));
73943
73944 if (vma_wants_writenotify(vma)) {
73945 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
73946@@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73947 end = start + len;
73948 if (end <= start)
73949 return -ENOMEM;
73950+
73951+#ifdef CONFIG_PAX_SEGMEXEC
73952+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
73953+ if (end > SEGMEXEC_TASK_SIZE)
73954+ return -EINVAL;
73955+ } else
73956+#endif
73957+
73958+ if (end > TASK_SIZE)
73959+ return -EINVAL;
73960+
73961 if (!arch_validate_prot(prot))
73962 return -EINVAL;
73963
73964@@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73965 /*
73966 * Does the application expect PROT_READ to imply PROT_EXEC:
73967 */
73968- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
73969+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
73970 prot |= PROT_EXEC;
73971
73972 vm_flags = calc_vm_prot_bits(prot);
73973@@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73974 if (start > vma->vm_start)
73975 prev = vma;
73976
73977+#ifdef CONFIG_PAX_MPROTECT
73978+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
73979+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
73980+#endif
73981+
73982 for (nstart = start ; ; ) {
73983 unsigned long newflags;
73984
73985@@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73986
73987 /* newflags >> 4 shift VM_MAY% in place of VM_% */
73988 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
73989+ if (prot & (PROT_WRITE | PROT_EXEC))
73990+ gr_log_rwxmprotect(vma->vm_file);
73991+
73992+ error = -EACCES;
73993+ goto out;
73994+ }
73995+
73996+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
73997 error = -EACCES;
73998 goto out;
73999 }
74000@@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
74001 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
74002 if (error)
74003 goto out;
74004+
74005+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
74006+
74007 nstart = tmp;
74008
74009 if (nstart < prev->vm_end)
74010diff --git a/mm/mremap.c b/mm/mremap.c
74011index 21fed20..6822658 100644
74012--- a/mm/mremap.c
74013+++ b/mm/mremap.c
74014@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
74015 continue;
74016 pte = ptep_get_and_clear(mm, old_addr, old_pte);
74017 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
74018+
74019+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
74020+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
74021+ pte = pte_exprotect(pte);
74022+#endif
74023+
74024 set_pte_at(mm, new_addr, new_pte, pte);
74025 }
74026
74027@@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
74028 if (is_vm_hugetlb_page(vma))
74029 goto Einval;
74030
74031+#ifdef CONFIG_PAX_SEGMEXEC
74032+ if (pax_find_mirror_vma(vma))
74033+ goto Einval;
74034+#endif
74035+
74036 /* We can't remap across vm area boundaries */
74037 if (old_len > vma->vm_end - addr)
74038 goto Efault;
74039@@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
74040 unsigned long ret = -EINVAL;
74041 unsigned long charged = 0;
74042 unsigned long map_flags;
74043+ unsigned long pax_task_size = TASK_SIZE;
74044
74045 if (new_addr & ~PAGE_MASK)
74046 goto out;
74047
74048- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
74049+#ifdef CONFIG_PAX_SEGMEXEC
74050+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
74051+ pax_task_size = SEGMEXEC_TASK_SIZE;
74052+#endif
74053+
74054+ pax_task_size -= PAGE_SIZE;
74055+
74056+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
74057 goto out;
74058
74059 /* Check if the location we're moving into overlaps the
74060 * old location at all, and fail if it does.
74061 */
74062- if ((new_addr <= addr) && (new_addr+new_len) > addr)
74063- goto out;
74064-
74065- if ((addr <= new_addr) && (addr+old_len) > new_addr)
74066+ if (addr + old_len > new_addr && new_addr + new_len > addr)
74067 goto out;
74068
74069 ret = do_munmap(mm, new_addr, new_len);
74070@@ -436,6 +452,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
74071 struct vm_area_struct *vma;
74072 unsigned long ret = -EINVAL;
74073 unsigned long charged = 0;
74074+ unsigned long pax_task_size = TASK_SIZE;
74075
74076 down_write(&current->mm->mmap_sem);
74077
74078@@ -456,6 +473,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
74079 if (!new_len)
74080 goto out;
74081
74082+#ifdef CONFIG_PAX_SEGMEXEC
74083+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
74084+ pax_task_size = SEGMEXEC_TASK_SIZE;
74085+#endif
74086+
74087+ pax_task_size -= PAGE_SIZE;
74088+
74089+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
74090+ old_len > pax_task_size || addr > pax_task_size-old_len)
74091+ goto out;
74092+
74093 if (flags & MREMAP_FIXED) {
74094 if (flags & MREMAP_MAYMOVE)
74095 ret = mremap_to(addr, old_len, new_addr, new_len);
74096@@ -505,6 +533,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
74097 addr + new_len);
74098 }
74099 ret = addr;
74100+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
74101 goto out;
74102 }
74103 }
74104@@ -528,7 +557,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
74105 goto out;
74106 }
74107
74108+ map_flags = vma->vm_flags;
74109 ret = move_vma(vma, addr, old_len, new_len, new_addr);
74110+ if (!(ret & ~PAGE_MASK)) {
74111+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
74112+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
74113+ }
74114 }
74115 out:
74116 if (ret & ~PAGE_MASK)
74117diff --git a/mm/nommu.c b/mm/nommu.c
74118index d4b0c10..ed421b5 100644
74119--- a/mm/nommu.c
74120+++ b/mm/nommu.c
74121@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
74122 int sysctl_overcommit_ratio = 50; /* default is 50% */
74123 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
74124 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
74125-int heap_stack_gap = 0;
74126
74127 atomic_long_t mmap_pages_allocated;
74128
74129@@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
74130 EXPORT_SYMBOL(find_vma);
74131
74132 /*
74133- * find a VMA
74134- * - we don't extend stack VMAs under NOMMU conditions
74135- */
74136-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
74137-{
74138- return find_vma(mm, addr);
74139-}
74140-
74141-/*
74142 * expand a stack to a given address
74143 * - not supported under NOMMU conditions
74144 */
74145@@ -1551,6 +1541,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
74146
74147 /* most fields are the same, copy all, and then fixup */
74148 *new = *vma;
74149+ INIT_LIST_HEAD(&new->anon_vma_chain);
74150 *region = *vma->vm_region;
74151 new->vm_region = region;
74152
74153diff --git a/mm/page_alloc.c b/mm/page_alloc.c
74154index 201b508..1fb51ca 100644
74155--- a/mm/page_alloc.c
74156+++ b/mm/page_alloc.c
74157@@ -336,7 +336,7 @@ out:
74158 * This usage means that zero-order pages may not be compound.
74159 */
74160
74161-static void free_compound_page(struct page *page)
74162+void free_compound_page(struct page *page)
74163 {
74164 __free_pages_ok(page, compound_order(page));
74165 }
74166@@ -693,6 +693,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
74167 int i;
74168 int bad = 0;
74169
74170+#ifdef CONFIG_PAX_MEMORY_SANITIZE
74171+ unsigned long index = 1UL << order;
74172+#endif
74173+
74174 trace_mm_page_free(page, order);
74175 kmemcheck_free_shadow(page, order);
74176
74177@@ -708,6 +712,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
74178 debug_check_no_obj_freed(page_address(page),
74179 PAGE_SIZE << order);
74180 }
74181+
74182+#ifdef CONFIG_PAX_MEMORY_SANITIZE
74183+ for (; index; --index)
74184+ sanitize_highpage(page + index - 1);
74185+#endif
74186+
74187 arch_free_page(page, order);
74188 kernel_map_pages(page, 1 << order, 0);
74189
74190@@ -849,8 +859,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
74191 arch_alloc_page(page, order);
74192 kernel_map_pages(page, 1 << order, 1);
74193
74194+#ifndef CONFIG_PAX_MEMORY_SANITIZE
74195 if (gfp_flags & __GFP_ZERO)
74196 prep_zero_page(page, order, gfp_flags);
74197+#endif
74198
74199 if (order && (gfp_flags & __GFP_COMP))
74200 prep_compound_page(page, order);
74201@@ -3579,7 +3591,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
74202 unsigned long pfn;
74203
74204 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
74205+#ifdef CONFIG_X86_32
74206+ /* boot failures in VMware 8 on 32bit vanilla since
74207+ this change */
74208+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
74209+#else
74210 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
74211+#endif
74212 return 1;
74213 }
74214 return 0;
74215diff --git a/mm/percpu.c b/mm/percpu.c
74216index bb4be74..a43ea85 100644
74217--- a/mm/percpu.c
74218+++ b/mm/percpu.c
74219@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
74220 static unsigned int pcpu_high_unit_cpu __read_mostly;
74221
74222 /* the address of the first chunk which starts with the kernel static area */
74223-void *pcpu_base_addr __read_mostly;
74224+void *pcpu_base_addr __read_only;
74225 EXPORT_SYMBOL_GPL(pcpu_base_addr);
74226
74227 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
74228diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
74229index 926b466..b23df53 100644
74230--- a/mm/process_vm_access.c
74231+++ b/mm/process_vm_access.c
74232@@ -13,6 +13,7 @@
74233 #include <linux/uio.h>
74234 #include <linux/sched.h>
74235 #include <linux/highmem.h>
74236+#include <linux/security.h>
74237 #include <linux/ptrace.h>
74238 #include <linux/slab.h>
74239 #include <linux/syscalls.h>
74240@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
74241 size_t iov_l_curr_offset = 0;
74242 ssize_t iov_len;
74243
74244+ return -ENOSYS; // PaX: until properly audited
74245+
74246 /*
74247 * Work out how many pages of struct pages we're going to need
74248 * when eventually calling get_user_pages
74249 */
74250 for (i = 0; i < riovcnt; i++) {
74251 iov_len = rvec[i].iov_len;
74252- if (iov_len > 0) {
74253- nr_pages_iov = ((unsigned long)rvec[i].iov_base
74254- + iov_len)
74255- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
74256- / PAGE_SIZE + 1;
74257- nr_pages = max(nr_pages, nr_pages_iov);
74258- }
74259+ if (iov_len <= 0)
74260+ continue;
74261+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
74262+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
74263+ nr_pages = max(nr_pages, nr_pages_iov);
74264 }
74265
74266 if (nr_pages == 0)
74267@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
74268 goto free_proc_pages;
74269 }
74270
74271+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
74272+ rc = -EPERM;
74273+ goto put_task_struct;
74274+ }
74275+
74276 mm = mm_access(task, PTRACE_MODE_ATTACH);
74277 if (!mm || IS_ERR(mm)) {
74278 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
74279diff --git a/mm/rmap.c b/mm/rmap.c
74280index 0f3b7cd..c5652b6 100644
74281--- a/mm/rmap.c
74282+++ b/mm/rmap.c
74283@@ -167,6 +167,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
74284 struct anon_vma *anon_vma = vma->anon_vma;
74285 struct anon_vma_chain *avc;
74286
74287+#ifdef CONFIG_PAX_SEGMEXEC
74288+ struct anon_vma_chain *avc_m = NULL;
74289+#endif
74290+
74291 might_sleep();
74292 if (unlikely(!anon_vma)) {
74293 struct mm_struct *mm = vma->vm_mm;
74294@@ -176,6 +180,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
74295 if (!avc)
74296 goto out_enomem;
74297
74298+#ifdef CONFIG_PAX_SEGMEXEC
74299+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
74300+ if (!avc_m)
74301+ goto out_enomem_free_avc;
74302+#endif
74303+
74304 anon_vma = find_mergeable_anon_vma(vma);
74305 allocated = NULL;
74306 if (!anon_vma) {
74307@@ -189,6 +199,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
74308 /* page_table_lock to protect against threads */
74309 spin_lock(&mm->page_table_lock);
74310 if (likely(!vma->anon_vma)) {
74311+
74312+#ifdef CONFIG_PAX_SEGMEXEC
74313+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
74314+
74315+ if (vma_m) {
74316+ BUG_ON(vma_m->anon_vma);
74317+ vma_m->anon_vma = anon_vma;
74318+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
74319+ avc_m = NULL;
74320+ }
74321+#endif
74322+
74323 vma->anon_vma = anon_vma;
74324 anon_vma_chain_link(vma, avc, anon_vma);
74325 allocated = NULL;
74326@@ -199,12 +221,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
74327
74328 if (unlikely(allocated))
74329 put_anon_vma(allocated);
74330+
74331+#ifdef CONFIG_PAX_SEGMEXEC
74332+ if (unlikely(avc_m))
74333+ anon_vma_chain_free(avc_m);
74334+#endif
74335+
74336 if (unlikely(avc))
74337 anon_vma_chain_free(avc);
74338 }
74339 return 0;
74340
74341 out_enomem_free_avc:
74342+
74343+#ifdef CONFIG_PAX_SEGMEXEC
74344+ if (avc_m)
74345+ anon_vma_chain_free(avc_m);
74346+#endif
74347+
74348 anon_vma_chain_free(avc);
74349 out_enomem:
74350 return -ENOMEM;
74351@@ -240,7 +274,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
74352 * Attach the anon_vmas from src to dst.
74353 * Returns 0 on success, -ENOMEM on failure.
74354 */
74355-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
74356+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
74357 {
74358 struct anon_vma_chain *avc, *pavc;
74359 struct anon_vma *root = NULL;
74360@@ -318,7 +352,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
74361 * the corresponding VMA in the parent process is attached to.
74362 * Returns 0 on success, non-zero on failure.
74363 */
74364-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
74365+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
74366 {
74367 struct anon_vma_chain *avc;
74368 struct anon_vma *anon_vma;
74369diff --git a/mm/shmem.c b/mm/shmem.c
74370index bd10636..5c16d49 100644
74371--- a/mm/shmem.c
74372+++ b/mm/shmem.c
74373@@ -31,7 +31,7 @@
74374 #include <linux/export.h>
74375 #include <linux/swap.h>
74376
74377-static struct vfsmount *shm_mnt;
74378+struct vfsmount *shm_mnt;
74379
74380 #ifdef CONFIG_SHMEM
74381 /*
74382@@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
74383 #define BOGO_DIRENT_SIZE 20
74384
74385 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
74386-#define SHORT_SYMLINK_LEN 128
74387+#define SHORT_SYMLINK_LEN 64
74388
74389 struct shmem_xattr {
74390 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
74391@@ -2590,8 +2590,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
74392 int err = -ENOMEM;
74393
74394 /* Round up to L1_CACHE_BYTES to resist false sharing */
74395- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
74396- L1_CACHE_BYTES), GFP_KERNEL);
74397+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
74398 if (!sbinfo)
74399 return -ENOMEM;
74400
74401diff --git a/mm/slab.c b/mm/slab.c
74402index e901a36..9ff3f90 100644
74403--- a/mm/slab.c
74404+++ b/mm/slab.c
74405@@ -153,7 +153,7 @@
74406
74407 /* Legal flag mask for kmem_cache_create(). */
74408 #if DEBUG
74409-# define CREATE_MASK (SLAB_RED_ZONE | \
74410+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
74411 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
74412 SLAB_CACHE_DMA | \
74413 SLAB_STORE_USER | \
74414@@ -161,7 +161,7 @@
74415 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
74416 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
74417 #else
74418-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
74419+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
74420 SLAB_CACHE_DMA | \
74421 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
74422 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
74423@@ -290,7 +290,7 @@ struct kmem_list3 {
74424 * Need this for bootstrapping a per node allocator.
74425 */
74426 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
74427-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
74428+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
74429 #define CACHE_CACHE 0
74430 #define SIZE_AC MAX_NUMNODES
74431 #define SIZE_L3 (2 * MAX_NUMNODES)
74432@@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
74433 if ((x)->max_freeable < i) \
74434 (x)->max_freeable = i; \
74435 } while (0)
74436-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
74437-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
74438-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
74439-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
74440+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
74441+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
74442+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
74443+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
74444 #else
74445 #define STATS_INC_ACTIVE(x) do { } while (0)
74446 #define STATS_DEC_ACTIVE(x) do { } while (0)
74447@@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
74448 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
74449 */
74450 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
74451- const struct slab *slab, void *obj)
74452+ const struct slab *slab, const void *obj)
74453 {
74454 u32 offset = (obj - slab->s_mem);
74455 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
74456@@ -563,12 +563,13 @@ EXPORT_SYMBOL(malloc_sizes);
74457 struct cache_names {
74458 char *name;
74459 char *name_dma;
74460+ char *name_usercopy;
74461 };
74462
74463 static struct cache_names __initdata cache_names[] = {
74464-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
74465+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
74466 #include <linux/kmalloc_sizes.h>
74467- {NULL,}
74468+ {NULL}
74469 #undef CACHE
74470 };
74471
74472@@ -756,6 +757,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
74473 if (unlikely(gfpflags & GFP_DMA))
74474 return csizep->cs_dmacachep;
74475 #endif
74476+
74477+#ifdef CONFIG_PAX_USERCOPY_SLABS
74478+ if (unlikely(gfpflags & GFP_USERCOPY))
74479+ return csizep->cs_usercopycachep;
74480+#endif
74481+
74482 return csizep->cs_cachep;
74483 }
74484
74485@@ -1588,7 +1595,7 @@ void __init kmem_cache_init(void)
74486 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
74487 sizes[INDEX_AC].cs_size,
74488 ARCH_KMALLOC_MINALIGN,
74489- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74490+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74491 NULL);
74492
74493 if (INDEX_AC != INDEX_L3) {
74494@@ -1596,7 +1603,7 @@ void __init kmem_cache_init(void)
74495 kmem_cache_create(names[INDEX_L3].name,
74496 sizes[INDEX_L3].cs_size,
74497 ARCH_KMALLOC_MINALIGN,
74498- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74499+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74500 NULL);
74501 }
74502
74503@@ -1614,7 +1621,7 @@ void __init kmem_cache_init(void)
74504 sizes->cs_cachep = kmem_cache_create(names->name,
74505 sizes->cs_size,
74506 ARCH_KMALLOC_MINALIGN,
74507- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
74508+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74509 NULL);
74510 }
74511 #ifdef CONFIG_ZONE_DMA
74512@@ -1626,6 +1633,16 @@ void __init kmem_cache_init(void)
74513 SLAB_PANIC,
74514 NULL);
74515 #endif
74516+
74517+#ifdef CONFIG_PAX_USERCOPY_SLABS
74518+ sizes->cs_usercopycachep = kmem_cache_create(
74519+ names->name_usercopy,
74520+ sizes->cs_size,
74521+ ARCH_KMALLOC_MINALIGN,
74522+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
74523+ NULL);
74524+#endif
74525+
74526 sizes++;
74527 names++;
74528 }
74529@@ -4390,10 +4407,10 @@ static int s_show(struct seq_file *m, void *p)
74530 }
74531 /* cpu stats */
74532 {
74533- unsigned long allochit = atomic_read(&cachep->allochit);
74534- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
74535- unsigned long freehit = atomic_read(&cachep->freehit);
74536- unsigned long freemiss = atomic_read(&cachep->freemiss);
74537+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
74538+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
74539+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
74540+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
74541
74542 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
74543 allochit, allocmiss, freehit, freemiss);
74544@@ -4652,13 +4669,71 @@ static int __init slab_proc_init(void)
74545 {
74546 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
74547 #ifdef CONFIG_DEBUG_SLAB_LEAK
74548- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
74549+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
74550 #endif
74551 return 0;
74552 }
74553 module_init(slab_proc_init);
74554 #endif
74555
74556+bool is_usercopy_object(const void *ptr)
74557+{
74558+ struct page *page;
74559+ struct kmem_cache *cachep;
74560+
74561+ if (ZERO_OR_NULL_PTR(ptr))
74562+ return false;
74563+
74564+ if (!slab_is_available())
74565+ return false;
74566+
74567+ if (!virt_addr_valid(ptr))
74568+ return false;
74569+
74570+ page = virt_to_head_page(ptr);
74571+
74572+ if (!PageSlab(page))
74573+ return false;
74574+
74575+ cachep = page_get_cache(page);
74576+ return cachep->flags & SLAB_USERCOPY;
74577+}
74578+
74579+#ifdef CONFIG_PAX_USERCOPY
74580+const char *check_heap_object(const void *ptr, unsigned long n, bool to)
74581+{
74582+ struct page *page;
74583+ struct kmem_cache *cachep;
74584+ struct slab *slabp;
74585+ unsigned int objnr;
74586+ unsigned long offset;
74587+
74588+ if (ZERO_OR_NULL_PTR(ptr))
74589+ return "<null>";
74590+
74591+ if (!virt_addr_valid(ptr))
74592+ return NULL;
74593+
74594+ page = virt_to_head_page(ptr);
74595+
74596+ if (!PageSlab(page))
74597+ return NULL;
74598+
74599+ cachep = page_get_cache(page);
74600+ if (!(cachep->flags & SLAB_USERCOPY))
74601+ return cachep->name;
74602+
74603+ slabp = page_get_slab(page);
74604+ objnr = obj_to_index(cachep, slabp, ptr);
74605+ BUG_ON(objnr >= cachep->num);
74606+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
74607+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
74608+ return NULL;
74609+
74610+ return cachep->name;
74611+}
74612+#endif
74613+
74614 /**
74615 * ksize - get the actual amount of memory allocated for a given object
74616 * @objp: Pointer to the object
74617diff --git a/mm/slob.c b/mm/slob.c
74618index 8105be4..33e52d7 100644
74619--- a/mm/slob.c
74620+++ b/mm/slob.c
74621@@ -29,7 +29,7 @@
74622 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
74623 * alloc_pages() directly, allocating compound pages so the page order
74624 * does not have to be separately tracked, and also stores the exact
74625- * allocation size in page->private so that it can be used to accurately
74626+ * allocation size in slob_page->size so that it can be used to accurately
74627 * provide ksize(). These objects are detected in kfree() because slob_page()
74628 * is false for them.
74629 *
74630@@ -58,6 +58,7 @@
74631 */
74632
74633 #include <linux/kernel.h>
74634+#include <linux/sched.h>
74635 #include <linux/slab.h>
74636 #include <linux/mm.h>
74637 #include <linux/swap.h> /* struct reclaim_state */
74638@@ -100,9 +101,8 @@ struct slob_page {
74639 union {
74640 struct {
74641 unsigned long flags; /* mandatory */
74642- atomic_t _count; /* mandatory */
74643 slobidx_t units; /* free units left in page */
74644- unsigned long pad[2];
74645+ unsigned long size; /* size when >=PAGE_SIZE */
74646 slob_t *free; /* first free slob_t in page */
74647 struct list_head list; /* linked list of free pages */
74648 };
74649@@ -135,7 +135,7 @@ static LIST_HEAD(free_slob_large);
74650 */
74651 static inline int is_slob_page(struct slob_page *sp)
74652 {
74653- return PageSlab((struct page *)sp);
74654+ return PageSlab((struct page *)sp) && !sp->size;
74655 }
74656
74657 static inline void set_slob_page(struct slob_page *sp)
74658@@ -150,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
74659
74660 static inline struct slob_page *slob_page(const void *addr)
74661 {
74662- return (struct slob_page *)virt_to_page(addr);
74663+ return (struct slob_page *)virt_to_head_page(addr);
74664 }
74665
74666 /*
74667@@ -210,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
74668 /*
74669 * Return the size of a slob block.
74670 */
74671-static slobidx_t slob_units(slob_t *s)
74672+static slobidx_t slob_units(const slob_t *s)
74673 {
74674 if (s->units > 0)
74675 return s->units;
74676@@ -220,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
74677 /*
74678 * Return the next free slob block pointer after this one.
74679 */
74680-static slob_t *slob_next(slob_t *s)
74681+static slob_t *slob_next(const slob_t *s)
74682 {
74683 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
74684 slobidx_t next;
74685@@ -235,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
74686 /*
74687 * Returns true if s is the last free block in its page.
74688 */
74689-static int slob_last(slob_t *s)
74690+static int slob_last(const slob_t *s)
74691 {
74692 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
74693 }
74694@@ -254,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
74695 if (!page)
74696 return NULL;
74697
74698+ set_slob_page(page);
74699 return page_address(page);
74700 }
74701
74702@@ -370,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
74703 if (!b)
74704 return NULL;
74705 sp = slob_page(b);
74706- set_slob_page(sp);
74707
74708 spin_lock_irqsave(&slob_lock, flags);
74709 sp->units = SLOB_UNITS(PAGE_SIZE);
74710 sp->free = b;
74711+ sp->size = 0;
74712 INIT_LIST_HEAD(&sp->list);
74713 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
74714 set_slob_page_free(sp, slob_list);
74715@@ -476,10 +477,9 @@ out:
74716 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
74717 */
74718
74719-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74720+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
74721 {
74722- unsigned int *m;
74723- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74724+ slob_t *m;
74725 void *ret;
74726
74727 gfp &= gfp_allowed_mask;
74728@@ -494,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74729
74730 if (!m)
74731 return NULL;
74732- *m = size;
74733+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
74734+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
74735+ m[0].units = size;
74736+ m[1].units = align;
74737 ret = (void *)m + align;
74738
74739 trace_kmalloc_node(_RET_IP_, ret,
74740@@ -506,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74741 gfp |= __GFP_COMP;
74742 ret = slob_new_pages(gfp, order, node);
74743 if (ret) {
74744- struct page *page;
74745- page = virt_to_page(ret);
74746- page->private = size;
74747+ struct slob_page *sp;
74748+ sp = slob_page(ret);
74749+ sp->size = size;
74750 }
74751
74752 trace_kmalloc_node(_RET_IP_, ret,
74753 size, PAGE_SIZE << order, gfp, node);
74754 }
74755
74756- kmemleak_alloc(ret, size, 1, gfp);
74757+ return ret;
74758+}
74759+
74760+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74761+{
74762+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74763+ void *ret = __kmalloc_node_align(size, gfp, node, align);
74764+
74765+ if (!ZERO_OR_NULL_PTR(ret))
74766+ kmemleak_alloc(ret, size, 1, gfp);
74767 return ret;
74768 }
74769 EXPORT_SYMBOL(__kmalloc_node);
74770@@ -533,13 +545,88 @@ void kfree(const void *block)
74771 sp = slob_page(block);
74772 if (is_slob_page(sp)) {
74773 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74774- unsigned int *m = (unsigned int *)(block - align);
74775- slob_free(m, *m + align);
74776- } else
74777+ slob_t *m = (slob_t *)(block - align);
74778+ slob_free(m, m[0].units + align);
74779+ } else {
74780+ clear_slob_page(sp);
74781+ free_slob_page(sp);
74782+ sp->size = 0;
74783 put_page(&sp->page);
74784+ }
74785 }
74786 EXPORT_SYMBOL(kfree);
74787
74788+bool is_usercopy_object(const void *ptr)
74789+{
74790+ if (!slab_is_available())
74791+ return false;
74792+
74793+ // PAX: TODO
74794+
74795+ return false;
74796+}
74797+
74798+#ifdef CONFIG_PAX_USERCOPY
74799+const char *check_heap_object(const void *ptr, unsigned long n, bool to)
74800+{
74801+ struct slob_page *sp;
74802+ const slob_t *free;
74803+ const void *base;
74804+ unsigned long flags;
74805+
74806+ if (ZERO_OR_NULL_PTR(ptr))
74807+ return "<null>";
74808+
74809+ if (!virt_addr_valid(ptr))
74810+ return NULL;
74811+
74812+ sp = slob_page(ptr);
74813+ if (!PageSlab((struct page *)sp))
74814+ return NULL;
74815+
74816+ if (sp->size) {
74817+ base = page_address(&sp->page);
74818+ if (base <= ptr && n <= sp->size - (ptr - base))
74819+ return NULL;
74820+ return "<slob>";
74821+ }
74822+
74823+ /* some tricky double walking to find the chunk */
74824+ spin_lock_irqsave(&slob_lock, flags);
74825+ base = (void *)((unsigned long)ptr & PAGE_MASK);
74826+ free = sp->free;
74827+
74828+ while ((void *)free <= ptr) {
74829+ base = free + slob_units(free);
74830+ free = slob_next(free);
74831+ }
74832+
74833+ while (base < (void *)free) {
74834+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
74835+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
74836+ int offset;
74837+
74838+ if (ptr < base + align)
74839+ break;
74840+
74841+ offset = ptr - base - align;
74842+ if (offset >= m) {
74843+ base += size;
74844+ continue;
74845+ }
74846+
74847+ if (n > m - offset)
74848+ break;
74849+
74850+ spin_unlock_irqrestore(&slob_lock, flags);
74851+ return NULL;
74852+ }
74853+
74854+ spin_unlock_irqrestore(&slob_lock, flags);
74855+ return "<slob>";
74856+}
74857+#endif
74858+
74859 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
74860 size_t ksize(const void *block)
74861 {
74862@@ -552,10 +639,10 @@ size_t ksize(const void *block)
74863 sp = slob_page(block);
74864 if (is_slob_page(sp)) {
74865 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74866- unsigned int *m = (unsigned int *)(block - align);
74867- return SLOB_UNITS(*m) * SLOB_UNIT;
74868+ slob_t *m = (slob_t *)(block - align);
74869+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
74870 } else
74871- return sp->page.private;
74872+ return sp->size;
74873 }
74874 EXPORT_SYMBOL(ksize);
74875
74876@@ -571,8 +658,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
74877 {
74878 struct kmem_cache *c;
74879
74880+#ifdef CONFIG_PAX_USERCOPY_SLABS
74881+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
74882+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
74883+#else
74884 c = slob_alloc(sizeof(struct kmem_cache),
74885 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
74886+#endif
74887
74888 if (c) {
74889 c->name = name;
74890@@ -614,17 +706,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
74891
74892 lockdep_trace_alloc(flags);
74893
74894+#ifdef CONFIG_PAX_USERCOPY_SLABS
74895+ b = __kmalloc_node_align(c->size, flags, node, c->align);
74896+#else
74897 if (c->size < PAGE_SIZE) {
74898 b = slob_alloc(c->size, flags, c->align, node);
74899 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
74900 SLOB_UNITS(c->size) * SLOB_UNIT,
74901 flags, node);
74902 } else {
74903+ struct slob_page *sp;
74904+
74905 b = slob_new_pages(flags, get_order(c->size), node);
74906+ sp = slob_page(b);
74907+ sp->size = c->size;
74908 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
74909 PAGE_SIZE << get_order(c->size),
74910 flags, node);
74911 }
74912+#endif
74913
74914 if (c->ctor)
74915 c->ctor(b);
74916@@ -636,10 +736,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
74917
74918 static void __kmem_cache_free(void *b, int size)
74919 {
74920- if (size < PAGE_SIZE)
74921+ struct slob_page *sp = slob_page(b);
74922+
74923+ if (is_slob_page(sp))
74924 slob_free(b, size);
74925- else
74926+ else {
74927+ clear_slob_page(sp);
74928+ free_slob_page(sp);
74929+ sp->size = 0;
74930 slob_free_pages(b, get_order(size));
74931+ }
74932 }
74933
74934 static void kmem_rcu_free(struct rcu_head *head)
74935@@ -652,17 +758,31 @@ static void kmem_rcu_free(struct rcu_head *head)
74936
74937 void kmem_cache_free(struct kmem_cache *c, void *b)
74938 {
74939+ int size = c->size;
74940+
74941+#ifdef CONFIG_PAX_USERCOPY_SLABS
74942+ if (size + c->align < PAGE_SIZE) {
74943+ size += c->align;
74944+ b -= c->align;
74945+ }
74946+#endif
74947+
74948 kmemleak_free_recursive(b, c->flags);
74949 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
74950 struct slob_rcu *slob_rcu;
74951- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
74952- slob_rcu->size = c->size;
74953+ slob_rcu = b + (size - sizeof(struct slob_rcu));
74954+ slob_rcu->size = size;
74955 call_rcu(&slob_rcu->head, kmem_rcu_free);
74956 } else {
74957- __kmem_cache_free(b, c->size);
74958+ __kmem_cache_free(b, size);
74959 }
74960
74961+#ifdef CONFIG_PAX_USERCOPY_SLABS
74962+ trace_kfree(_RET_IP_, b);
74963+#else
74964 trace_kmem_cache_free(_RET_IP_, b);
74965+#endif
74966+
74967 }
74968 EXPORT_SYMBOL(kmem_cache_free);
74969
74970diff --git a/mm/slub.c b/mm/slub.c
74971index 8c691fa..2993c2b 100644
74972--- a/mm/slub.c
74973+++ b/mm/slub.c
74974@@ -209,7 +209,7 @@ struct track {
74975
74976 enum track_item { TRACK_ALLOC, TRACK_FREE };
74977
74978-#ifdef CONFIG_SYSFS
74979+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74980 static int sysfs_slab_add(struct kmem_cache *);
74981 static int sysfs_slab_alias(struct kmem_cache *, const char *);
74982 static void sysfs_slab_remove(struct kmem_cache *);
74983@@ -538,7 +538,7 @@ static void print_track(const char *s, struct track *t)
74984 if (!t->addr)
74985 return;
74986
74987- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
74988+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
74989 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
74990 #ifdef CONFIG_STACKTRACE
74991 {
74992@@ -2603,6 +2603,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
74993
74994 page = virt_to_head_page(x);
74995
74996+ BUG_ON(!PageSlab(page));
74997+
74998 slab_free(s, page, x, _RET_IP_);
74999
75000 trace_kmem_cache_free(_RET_IP_, x);
75001@@ -2636,7 +2638,7 @@ static int slub_min_objects;
75002 * Merge control. If this is set then no merging of slab caches will occur.
75003 * (Could be removed. This was introduced to pacify the merge skeptics.)
75004 */
75005-static int slub_nomerge;
75006+static int slub_nomerge = 1;
75007
75008 /*
75009 * Calculate the order of allocation given an slab object size.
75010@@ -3089,7 +3091,7 @@ static int kmem_cache_open(struct kmem_cache *s,
75011 else
75012 s->cpu_partial = 30;
75013
75014- s->refcount = 1;
75015+ atomic_set(&s->refcount, 1);
75016 #ifdef CONFIG_NUMA
75017 s->remote_node_defrag_ratio = 1000;
75018 #endif
75019@@ -3193,8 +3195,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
75020 void kmem_cache_destroy(struct kmem_cache *s)
75021 {
75022 down_write(&slub_lock);
75023- s->refcount--;
75024- if (!s->refcount) {
75025+ if (atomic_dec_and_test(&s->refcount)) {
75026 list_del(&s->list);
75027 up_write(&slub_lock);
75028 if (kmem_cache_close(s)) {
75029@@ -3223,6 +3224,10 @@ static struct kmem_cache *kmem_cache;
75030 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
75031 #endif
75032
75033+#ifdef CONFIG_PAX_USERCOPY_SLABS
75034+static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
75035+#endif
75036+
75037 static int __init setup_slub_min_order(char *str)
75038 {
75039 get_option(&str, &slub_min_order);
75040@@ -3337,6 +3342,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
75041 return kmalloc_dma_caches[index];
75042
75043 #endif
75044+
75045+#ifdef CONFIG_PAX_USERCOPY_SLABS
75046+ if (flags & SLAB_USERCOPY)
75047+ return kmalloc_usercopy_caches[index];
75048+
75049+#endif
75050+
75051 return kmalloc_caches[index];
75052 }
75053
75054@@ -3405,6 +3417,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
75055 EXPORT_SYMBOL(__kmalloc_node);
75056 #endif
75057
75058+bool is_usercopy_object(const void *ptr)
75059+{
75060+ struct page *page;
75061+ struct kmem_cache *s;
75062+
75063+ if (ZERO_OR_NULL_PTR(ptr))
75064+ return false;
75065+
75066+ if (!slab_is_available())
75067+ return false;
75068+
75069+ if (!virt_addr_valid(ptr))
75070+ return false;
75071+
75072+ page = virt_to_head_page(ptr);
75073+
75074+ if (!PageSlab(page))
75075+ return false;
75076+
75077+ s = page->slab;
75078+ return s->flags & SLAB_USERCOPY;
75079+}
75080+
75081+#ifdef CONFIG_PAX_USERCOPY
75082+const char *check_heap_object(const void *ptr, unsigned long n, bool to)
75083+{
75084+ struct page *page;
75085+ struct kmem_cache *s;
75086+ unsigned long offset;
75087+
75088+ if (ZERO_OR_NULL_PTR(ptr))
75089+ return "<null>";
75090+
75091+ if (!virt_addr_valid(ptr))
75092+ return NULL;
75093+
75094+ page = virt_to_head_page(ptr);
75095+
75096+ if (!PageSlab(page))
75097+ return NULL;
75098+
75099+ s = page->slab;
75100+ if (!(s->flags & SLAB_USERCOPY))
75101+ return s->name;
75102+
75103+ offset = (ptr - page_address(page)) % s->size;
75104+ if (offset <= s->objsize && n <= s->objsize - offset)
75105+ return NULL;
75106+
75107+ return s->name;
75108+}
75109+#endif
75110+
75111 size_t ksize(const void *object)
75112 {
75113 struct page *page;
75114@@ -3679,7 +3744,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
75115 int node;
75116
75117 list_add(&s->list, &slab_caches);
75118- s->refcount = -1;
75119+ atomic_set(&s->refcount, -1);
75120
75121 for_each_node_state(node, N_NORMAL_MEMORY) {
75122 struct kmem_cache_node *n = get_node(s, node);
75123@@ -3799,17 +3864,17 @@ void __init kmem_cache_init(void)
75124
75125 /* Caches that are not of the two-to-the-power-of size */
75126 if (KMALLOC_MIN_SIZE <= 32) {
75127- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
75128+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
75129 caches++;
75130 }
75131
75132 if (KMALLOC_MIN_SIZE <= 64) {
75133- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
75134+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
75135 caches++;
75136 }
75137
75138 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
75139- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
75140+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
75141 caches++;
75142 }
75143
75144@@ -3851,6 +3916,22 @@ void __init kmem_cache_init(void)
75145 }
75146 }
75147 #endif
75148+
75149+#ifdef CONFIG_PAX_USERCOPY_SLABS
75150+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
75151+ struct kmem_cache *s = kmalloc_caches[i];
75152+
75153+ if (s && s->size) {
75154+ char *name = kasprintf(GFP_NOWAIT,
75155+ "usercopy-kmalloc-%d", s->objsize);
75156+
75157+ BUG_ON(!name);
75158+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
75159+ s->objsize, SLAB_USERCOPY);
75160+ }
75161+ }
75162+#endif
75163+
75164 printk(KERN_INFO
75165 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
75166 " CPUs=%d, Nodes=%d\n",
75167@@ -3877,7 +3958,7 @@ static int slab_unmergeable(struct kmem_cache *s)
75168 /*
75169 * We may have set a slab to be unmergeable during bootstrap.
75170 */
75171- if (s->refcount < 0)
75172+ if (atomic_read(&s->refcount) < 0)
75173 return 1;
75174
75175 return 0;
75176@@ -3936,7 +4017,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
75177 down_write(&slub_lock);
75178 s = find_mergeable(size, align, flags, name, ctor);
75179 if (s) {
75180- s->refcount++;
75181+ atomic_inc(&s->refcount);
75182 /*
75183 * Adjust the object sizes so that we clear
75184 * the complete object on kzalloc.
75185@@ -3945,7 +4026,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
75186 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
75187
75188 if (sysfs_slab_alias(s, name)) {
75189- s->refcount--;
75190+ atomic_dec(&s->refcount);
75191 goto err;
75192 }
75193 up_write(&slub_lock);
75194@@ -4074,7 +4155,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
75195 }
75196 #endif
75197
75198-#ifdef CONFIG_SYSFS
75199+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75200 static int count_inuse(struct page *page)
75201 {
75202 return page->inuse;
75203@@ -4461,12 +4542,12 @@ static void resiliency_test(void)
75204 validate_slab_cache(kmalloc_caches[9]);
75205 }
75206 #else
75207-#ifdef CONFIG_SYSFS
75208+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75209 static void resiliency_test(void) {};
75210 #endif
75211 #endif
75212
75213-#ifdef CONFIG_SYSFS
75214+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75215 enum slab_stat_type {
75216 SL_ALL, /* All slabs */
75217 SL_PARTIAL, /* Only partially allocated slabs */
75218@@ -4709,7 +4790,7 @@ SLAB_ATTR_RO(ctor);
75219
75220 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
75221 {
75222- return sprintf(buf, "%d\n", s->refcount - 1);
75223+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
75224 }
75225 SLAB_ATTR_RO(aliases);
75226
75227@@ -5280,6 +5361,7 @@ static char *create_unique_id(struct kmem_cache *s)
75228 return name;
75229 }
75230
75231+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75232 static int sysfs_slab_add(struct kmem_cache *s)
75233 {
75234 int err;
75235@@ -5342,6 +5424,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
75236 kobject_del(&s->kobj);
75237 kobject_put(&s->kobj);
75238 }
75239+#endif
75240
75241 /*
75242 * Need to buffer aliases during bootup until sysfs becomes
75243@@ -5355,6 +5438,7 @@ struct saved_alias {
75244
75245 static struct saved_alias *alias_list;
75246
75247+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
75248 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
75249 {
75250 struct saved_alias *al;
75251@@ -5377,6 +5461,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
75252 alias_list = al;
75253 return 0;
75254 }
75255+#endif
75256
75257 static int __init slab_sysfs_init(void)
75258 {
75259diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
75260index 1b7e22a..3fcd4f3 100644
75261--- a/mm/sparse-vmemmap.c
75262+++ b/mm/sparse-vmemmap.c
75263@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
75264 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
75265 if (!p)
75266 return NULL;
75267- pud_populate(&init_mm, pud, p);
75268+ pud_populate_kernel(&init_mm, pud, p);
75269 }
75270 return pud;
75271 }
75272@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
75273 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
75274 if (!p)
75275 return NULL;
75276- pgd_populate(&init_mm, pgd, p);
75277+ pgd_populate_kernel(&init_mm, pgd, p);
75278 }
75279 return pgd;
75280 }
75281diff --git a/mm/swap.c b/mm/swap.c
75282index 4e7e2ec..0c57830 100644
75283--- a/mm/swap.c
75284+++ b/mm/swap.c
75285@@ -30,6 +30,7 @@
75286 #include <linux/backing-dev.h>
75287 #include <linux/memcontrol.h>
75288 #include <linux/gfp.h>
75289+#include <linux/hugetlb.h>
75290
75291 #include "internal.h"
75292
75293@@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
75294
75295 __page_cache_release(page);
75296 dtor = get_compound_page_dtor(page);
75297+ if (!PageHuge(page))
75298+ BUG_ON(dtor != free_compound_page);
75299 (*dtor)(page);
75300 }
75301
75302diff --git a/mm/swapfile.c b/mm/swapfile.c
75303index 71373d0..11fa7d9 100644
75304--- a/mm/swapfile.c
75305+++ b/mm/swapfile.c
75306@@ -63,7 +63,7 @@ static DEFINE_MUTEX(swapon_mutex);
75307
75308 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
75309 /* Activity counter to indicate that a swapon or swapoff has occurred */
75310-static atomic_t proc_poll_event = ATOMIC_INIT(0);
75311+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
75312
75313 static inline unsigned char swap_count(unsigned char ent)
75314 {
75315@@ -1663,7 +1663,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
75316 }
75317 filp_close(swap_file, NULL);
75318 err = 0;
75319- atomic_inc(&proc_poll_event);
75320+ atomic_inc_unchecked(&proc_poll_event);
75321 wake_up_interruptible(&proc_poll_wait);
75322
75323 out_dput:
75324@@ -1679,8 +1679,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
75325
75326 poll_wait(file, &proc_poll_wait, wait);
75327
75328- if (seq->poll_event != atomic_read(&proc_poll_event)) {
75329- seq->poll_event = atomic_read(&proc_poll_event);
75330+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
75331+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
75332 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
75333 }
75334
75335@@ -1778,7 +1778,7 @@ static int swaps_open(struct inode *inode, struct file *file)
75336 return ret;
75337
75338 seq = file->private_data;
75339- seq->poll_event = atomic_read(&proc_poll_event);
75340+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
75341 return 0;
75342 }
75343
75344@@ -2120,7 +2120,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
75345 (frontswap_map) ? "FS" : "");
75346
75347 mutex_unlock(&swapon_mutex);
75348- atomic_inc(&proc_poll_event);
75349+ atomic_inc_unchecked(&proc_poll_event);
75350 wake_up_interruptible(&proc_poll_wait);
75351
75352 if (S_ISREG(inode->i_mode))
75353diff --git a/mm/util.c b/mm/util.c
75354index 8c7265a..c96d884 100644
75355--- a/mm/util.c
75356+++ b/mm/util.c
75357@@ -285,6 +285,12 @@ done:
75358 void arch_pick_mmap_layout(struct mm_struct *mm)
75359 {
75360 mm->mmap_base = TASK_UNMAPPED_BASE;
75361+
75362+#ifdef CONFIG_PAX_RANDMMAP
75363+ if (mm->pax_flags & MF_PAX_RANDMMAP)
75364+ mm->mmap_base += mm->delta_mmap;
75365+#endif
75366+
75367 mm->get_unmapped_area = arch_get_unmapped_area;
75368 mm->unmap_area = arch_unmap_area;
75369 }
75370diff --git a/mm/vmalloc.c b/mm/vmalloc.c
75371index 2aad499..4006a74 100644
75372--- a/mm/vmalloc.c
75373+++ b/mm/vmalloc.c
75374@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
75375
75376 pte = pte_offset_kernel(pmd, addr);
75377 do {
75378- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
75379- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
75380+
75381+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75382+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
75383+ BUG_ON(!pte_exec(*pte));
75384+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
75385+ continue;
75386+ }
75387+#endif
75388+
75389+ {
75390+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
75391+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
75392+ }
75393 } while (pte++, addr += PAGE_SIZE, addr != end);
75394 }
75395
75396@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
75397 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
75398 {
75399 pte_t *pte;
75400+ int ret = -ENOMEM;
75401
75402 /*
75403 * nr is a running index into the array which helps higher level
75404@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
75405 pte = pte_alloc_kernel(pmd, addr);
75406 if (!pte)
75407 return -ENOMEM;
75408+
75409+ pax_open_kernel();
75410 do {
75411 struct page *page = pages[*nr];
75412
75413- if (WARN_ON(!pte_none(*pte)))
75414- return -EBUSY;
75415- if (WARN_ON(!page))
75416- return -ENOMEM;
75417+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75418+ if (pgprot_val(prot) & _PAGE_NX)
75419+#endif
75420+
75421+ if (WARN_ON(!pte_none(*pte))) {
75422+ ret = -EBUSY;
75423+ goto out;
75424+ }
75425+ if (WARN_ON(!page)) {
75426+ ret = -ENOMEM;
75427+ goto out;
75428+ }
75429 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
75430 (*nr)++;
75431 } while (pte++, addr += PAGE_SIZE, addr != end);
75432- return 0;
75433+ ret = 0;
75434+out:
75435+ pax_close_kernel();
75436+ return ret;
75437 }
75438
75439 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
75440@@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
75441 pmd_t *pmd;
75442 unsigned long next;
75443
75444- pmd = pmd_alloc(&init_mm, pud, addr);
75445+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
75446 if (!pmd)
75447 return -ENOMEM;
75448 do {
75449@@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
75450 pud_t *pud;
75451 unsigned long next;
75452
75453- pud = pud_alloc(&init_mm, pgd, addr);
75454+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
75455 if (!pud)
75456 return -ENOMEM;
75457 do {
75458@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
75459 * and fall back on vmalloc() if that fails. Others
75460 * just put it in the vmalloc space.
75461 */
75462-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
75463+#ifdef CONFIG_MODULES
75464+#ifdef MODULES_VADDR
75465 unsigned long addr = (unsigned long)x;
75466 if (addr >= MODULES_VADDR && addr < MODULES_END)
75467 return 1;
75468 #endif
75469+
75470+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
75471+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
75472+ return 1;
75473+#endif
75474+
75475+#endif
75476+
75477 return is_vmalloc_addr(x);
75478 }
75479
75480@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
75481
75482 if (!pgd_none(*pgd)) {
75483 pud_t *pud = pud_offset(pgd, addr);
75484+#ifdef CONFIG_X86
75485+ if (!pud_large(*pud))
75486+#endif
75487 if (!pud_none(*pud)) {
75488 pmd_t *pmd = pmd_offset(pud, addr);
75489+#ifdef CONFIG_X86
75490+ if (!pmd_large(*pmd))
75491+#endif
75492 if (!pmd_none(*pmd)) {
75493 pte_t *ptep, pte;
75494
75495@@ -329,7 +369,7 @@ static void purge_vmap_area_lazy(void);
75496 * Allocate a region of KVA of the specified size and alignment, within the
75497 * vstart and vend.
75498 */
75499-static struct vmap_area *alloc_vmap_area(unsigned long size,
75500+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
75501 unsigned long align,
75502 unsigned long vstart, unsigned long vend,
75503 int node, gfp_t gfp_mask)
75504@@ -1320,6 +1360,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
75505 struct vm_struct *area;
75506
75507 BUG_ON(in_interrupt());
75508+
75509+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75510+ if (flags & VM_KERNEXEC) {
75511+ if (start != VMALLOC_START || end != VMALLOC_END)
75512+ return NULL;
75513+ start = (unsigned long)MODULES_EXEC_VADDR;
75514+ end = (unsigned long)MODULES_EXEC_END;
75515+ }
75516+#endif
75517+
75518 if (flags & VM_IOREMAP) {
75519 int bit = fls(size);
75520
75521@@ -1552,6 +1602,11 @@ void *vmap(struct page **pages, unsigned int count,
75522 if (count > totalram_pages)
75523 return NULL;
75524
75525+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75526+ if (!(pgprot_val(prot) & _PAGE_NX))
75527+ flags |= VM_KERNEXEC;
75528+#endif
75529+
75530 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
75531 __builtin_return_address(0));
75532 if (!area)
75533@@ -1653,6 +1708,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
75534 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
75535 goto fail;
75536
75537+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
75538+ if (!(pgprot_val(prot) & _PAGE_NX))
75539+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
75540+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
75541+ else
75542+#endif
75543+
75544 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
75545 start, end, node, gfp_mask, caller);
75546 if (!area)
75547@@ -1826,10 +1888,9 @@ EXPORT_SYMBOL(vzalloc_node);
75548 * For tight control over page level allocator and protection flags
75549 * use __vmalloc() instead.
75550 */
75551-
75552 void *vmalloc_exec(unsigned long size)
75553 {
75554- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
75555+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
75556 -1, __builtin_return_address(0));
75557 }
75558
75559@@ -2124,6 +2185,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
75560 unsigned long uaddr = vma->vm_start;
75561 unsigned long usize = vma->vm_end - vma->vm_start;
75562
75563+ BUG_ON(vma->vm_mirror);
75564+
75565 if ((PAGE_SIZE-1) & (unsigned long)addr)
75566 return -EINVAL;
75567
75568diff --git a/mm/vmstat.c b/mm/vmstat.c
75569index 1bbbbd9..ff35669 100644
75570--- a/mm/vmstat.c
75571+++ b/mm/vmstat.c
75572@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
75573 *
75574 * vm_stat contains the global counters
75575 */
75576-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
75577+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
75578 EXPORT_SYMBOL(vm_stat);
75579
75580 #ifdef CONFIG_SMP
75581@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
75582 v = p->vm_stat_diff[i];
75583 p->vm_stat_diff[i] = 0;
75584 local_irq_restore(flags);
75585- atomic_long_add(v, &zone->vm_stat[i]);
75586+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
75587 global_diff[i] += v;
75588 #ifdef CONFIG_NUMA
75589 /* 3 seconds idle till flush */
75590@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
75591
75592 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
75593 if (global_diff[i])
75594- atomic_long_add(global_diff[i], &vm_stat[i]);
75595+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
75596 }
75597
75598 #endif
75599@@ -1211,10 +1211,20 @@ static int __init setup_vmstat(void)
75600 start_cpu_timer(cpu);
75601 #endif
75602 #ifdef CONFIG_PROC_FS
75603- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
75604- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
75605- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
75606- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
75607+ {
75608+ mode_t gr_mode = S_IRUGO;
75609+#ifdef CONFIG_GRKERNSEC_PROC_ADD
75610+ gr_mode = S_IRUSR;
75611+#endif
75612+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
75613+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
75614+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
75615+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
75616+#else
75617+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
75618+#endif
75619+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
75620+ }
75621 #endif
75622 return 0;
75623 }
75624diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
75625index 9096bcb..43ed7bb 100644
75626--- a/net/8021q/vlan.c
75627+++ b/net/8021q/vlan.c
75628@@ -557,8 +557,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
75629 err = -EPERM;
75630 if (!capable(CAP_NET_ADMIN))
75631 break;
75632- if ((args.u.name_type >= 0) &&
75633- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
75634+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
75635 struct vlan_net *vn;
75636
75637 vn = net_generic(net, vlan_net_id);
75638diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
75639index 6449bae..8c1f454 100644
75640--- a/net/9p/trans_fd.c
75641+++ b/net/9p/trans_fd.c
75642@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
75643 oldfs = get_fs();
75644 set_fs(get_ds());
75645 /* The cast to a user pointer is valid due to the set_fs() */
75646- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
75647+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
75648 set_fs(oldfs);
75649
75650 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
75651diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
75652index 876fbe8..8bbea9f 100644
75653--- a/net/atm/atm_misc.c
75654+++ b/net/atm/atm_misc.c
75655@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
75656 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
75657 return 1;
75658 atm_return(vcc, truesize);
75659- atomic_inc(&vcc->stats->rx_drop);
75660+ atomic_inc_unchecked(&vcc->stats->rx_drop);
75661 return 0;
75662 }
75663 EXPORT_SYMBOL(atm_charge);
75664@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
75665 }
75666 }
75667 atm_return(vcc, guess);
75668- atomic_inc(&vcc->stats->rx_drop);
75669+ atomic_inc_unchecked(&vcc->stats->rx_drop);
75670 return NULL;
75671 }
75672 EXPORT_SYMBOL(atm_alloc_charge);
75673@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
75674
75675 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
75676 {
75677-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
75678+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
75679 __SONET_ITEMS
75680 #undef __HANDLE_ITEM
75681 }
75682@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
75683
75684 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
75685 {
75686-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
75687+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
75688 __SONET_ITEMS
75689 #undef __HANDLE_ITEM
75690 }
75691diff --git a/net/atm/common.c b/net/atm/common.c
75692index b4b44db..0c0ad93 100644
75693--- a/net/atm/common.c
75694+++ b/net/atm/common.c
75695@@ -812,6 +812,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
75696
75697 if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
75698 return -ENOTCONN;
75699+ memset(&pvc, 0, sizeof(pvc));
75700 pvc.sap_family = AF_ATMPVC;
75701 pvc.sap_addr.itf = vcc->dev->number;
75702 pvc.sap_addr.vpi = vcc->vpi;
75703diff --git a/net/atm/lec.h b/net/atm/lec.h
75704index a86aff9..3a0d6f6 100644
75705--- a/net/atm/lec.h
75706+++ b/net/atm/lec.h
75707@@ -48,7 +48,7 @@ struct lane2_ops {
75708 const u8 *tlvs, u32 sizeoftlvs);
75709 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
75710 const u8 *tlvs, u32 sizeoftlvs);
75711-};
75712+} __no_const;
75713
75714 /*
75715 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
75716diff --git a/net/atm/mpc.h b/net/atm/mpc.h
75717index 0919a88..a23d54e 100644
75718--- a/net/atm/mpc.h
75719+++ b/net/atm/mpc.h
75720@@ -33,7 +33,7 @@ struct mpoa_client {
75721 struct mpc_parameters parameters; /* parameters for this client */
75722
75723 const struct net_device_ops *old_ops;
75724- struct net_device_ops new_ops;
75725+ net_device_ops_no_const new_ops;
75726 };
75727
75728
75729diff --git a/net/atm/proc.c b/net/atm/proc.c
75730index 0d020de..011c7bb 100644
75731--- a/net/atm/proc.c
75732+++ b/net/atm/proc.c
75733@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
75734 const struct k_atm_aal_stats *stats)
75735 {
75736 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
75737- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
75738- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
75739- atomic_read(&stats->rx_drop));
75740+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
75741+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
75742+ atomic_read_unchecked(&stats->rx_drop));
75743 }
75744
75745 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
75746diff --git a/net/atm/pvc.c b/net/atm/pvc.c
75747index 3a73491..ae03240 100644
75748--- a/net/atm/pvc.c
75749+++ b/net/atm/pvc.c
75750@@ -95,6 +95,7 @@ static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
75751 return -ENOTCONN;
75752 *sockaddr_len = sizeof(struct sockaddr_atmpvc);
75753 addr = (struct sockaddr_atmpvc *)sockaddr;
75754+ memset(addr, 0, sizeof(*addr));
75755 addr->sap_family = AF_ATMPVC;
75756 addr->sap_addr.itf = vcc->dev->number;
75757 addr->sap_addr.vpi = vcc->vpi;
75758diff --git a/net/atm/resources.c b/net/atm/resources.c
75759index 23f45ce..c748f1a 100644
75760--- a/net/atm/resources.c
75761+++ b/net/atm/resources.c
75762@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
75763 static void copy_aal_stats(struct k_atm_aal_stats *from,
75764 struct atm_aal_stats *to)
75765 {
75766-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
75767+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
75768 __AAL_STAT_ITEMS
75769 #undef __HANDLE_ITEM
75770 }
75771@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
75772 static void subtract_aal_stats(struct k_atm_aal_stats *from,
75773 struct atm_aal_stats *to)
75774 {
75775-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
75776+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
75777 __AAL_STAT_ITEMS
75778 #undef __HANDLE_ITEM
75779 }
75780diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
75781index dc53798..dc66333 100644
75782--- a/net/batman-adv/bat_iv_ogm.c
75783+++ b/net/batman-adv/bat_iv_ogm.c
75784@@ -63,7 +63,7 @@ static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface)
75785
75786 /* randomize initial seqno to avoid collision */
75787 get_random_bytes(&random_seqno, sizeof(random_seqno));
75788- atomic_set(&hard_iface->seqno, random_seqno);
75789+ atomic_set_unchecked(&hard_iface->seqno, random_seqno);
75790
75791 hard_iface->packet_len = BATMAN_OGM_HLEN;
75792 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
75793@@ -572,7 +572,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
75794
75795 /* change sequence number to network order */
75796 batman_ogm_packet->seqno =
75797- htonl((uint32_t)atomic_read(&hard_iface->seqno));
75798+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
75799
75800 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
75801 batman_ogm_packet->tt_crc = htons((uint16_t)
75802@@ -592,7 +592,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
75803 else
75804 batman_ogm_packet->gw_flags = NO_FLAGS;
75805
75806- atomic_inc(&hard_iface->seqno);
75807+ atomic_inc_unchecked(&hard_iface->seqno);
75808
75809 slide_own_bcast_window(hard_iface);
75810 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
75811@@ -956,7 +956,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
75812 return;
75813
75814 /* could be changed by schedule_own_packet() */
75815- if_incoming_seqno = atomic_read(&if_incoming->seqno);
75816+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
75817
75818 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
75819
75820diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
75821index dc334fa..766a01a 100644
75822--- a/net/batman-adv/hard-interface.c
75823+++ b/net/batman-adv/hard-interface.c
75824@@ -321,7 +321,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
75825 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
75826 dev_add_pack(&hard_iface->batman_adv_ptype);
75827
75828- atomic_set(&hard_iface->frag_seqno, 1);
75829+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
75830 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
75831 hard_iface->net_dev->name);
75832
75833@@ -444,7 +444,7 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
75834 * This can't be called via a bat_priv callback because
75835 * we have no bat_priv yet.
75836 */
75837- atomic_set(&hard_iface->seqno, 1);
75838+ atomic_set_unchecked(&hard_iface->seqno, 1);
75839 hard_iface->packet_buff = NULL;
75840
75841 return hard_iface;
75842diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
75843index a0ec0e4..7beb587 100644
75844--- a/net/batman-adv/soft-interface.c
75845+++ b/net/batman-adv/soft-interface.c
75846@@ -214,7 +214,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
75847
75848 /* set broadcast sequence number */
75849 bcast_packet->seqno =
75850- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
75851+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
75852
75853 add_bcast_packet_to_list(bat_priv, skb, 1);
75854
75855@@ -390,7 +390,7 @@ struct net_device *softif_create(const char *name)
75856 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
75857
75858 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
75859- atomic_set(&bat_priv->bcast_seqno, 1);
75860+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
75861 atomic_set(&bat_priv->ttvn, 0);
75862 atomic_set(&bat_priv->tt_local_changes, 0);
75863 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
75864diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
75865index 61308e8..2e142b2 100644
75866--- a/net/batman-adv/types.h
75867+++ b/net/batman-adv/types.h
75868@@ -38,8 +38,8 @@ struct hard_iface {
75869 int16_t if_num;
75870 char if_status;
75871 struct net_device *net_dev;
75872- atomic_t seqno;
75873- atomic_t frag_seqno;
75874+ atomic_unchecked_t seqno;
75875+ atomic_unchecked_t frag_seqno;
75876 unsigned char *packet_buff;
75877 int packet_len;
75878 struct kobject *hardif_obj;
75879@@ -163,7 +163,7 @@ struct bat_priv {
75880 atomic_t orig_interval; /* uint */
75881 atomic_t hop_penalty; /* uint */
75882 atomic_t log_level; /* uint */
75883- atomic_t bcast_seqno;
75884+ atomic_unchecked_t bcast_seqno;
75885 atomic_t bcast_queue_left;
75886 atomic_t batman_queue_left;
75887 atomic_t ttvn; /* translation table version number */
75888diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
75889index 74175c2..32f8901 100644
75890--- a/net/batman-adv/unicast.c
75891+++ b/net/batman-adv/unicast.c
75892@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
75893 frag1->flags = UNI_FRAG_HEAD | large_tail;
75894 frag2->flags = large_tail;
75895
75896- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
75897+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
75898 frag1->seqno = htons(seqno - 1);
75899 frag2->seqno = htons(seqno);
75900
75901diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
75902index 5914623..bedc768 100644
75903--- a/net/bluetooth/hci_sock.c
75904+++ b/net/bluetooth/hci_sock.c
75905@@ -706,6 +706,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
75906 *addr_len = sizeof(*haddr);
75907 haddr->hci_family = AF_BLUETOOTH;
75908 haddr->hci_dev = hdev->id;
75909+ haddr->hci_channel= 0;
75910
75911 release_sock(sk);
75912 return 0;
75913@@ -1016,6 +1017,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char
75914 {
75915 struct hci_filter *f = &hci_pi(sk)->filter;
75916
75917+ memset(&uf, 0, sizeof(uf));
75918 uf.type_mask = f->type_mask;
75919 uf.opcode = f->opcode;
75920 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
75921diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
75922index 7568a6f..ea3097b 100644
75923--- a/net/bluetooth/l2cap_core.c
75924+++ b/net/bluetooth/l2cap_core.c
75925@@ -2799,8 +2799,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
75926 break;
75927
75928 case L2CAP_CONF_RFC:
75929- if (olen == sizeof(rfc))
75930- memcpy(&rfc, (void *)val, olen);
75931+ if (olen != sizeof(rfc))
75932+ break;
75933+
75934+ memcpy(&rfc, (void *)val, olen);
75935
75936 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
75937 rfc.mode != chan->mode)
75938diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
75939index 3bb1611..fcf656b 100644
75940--- a/net/bluetooth/l2cap_sock.c
75941+++ b/net/bluetooth/l2cap_sock.c
75942@@ -246,6 +246,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
75943
75944 BT_DBG("sock %p, sk %p", sock, sk);
75945
75946+ memset(la, 0, sizeof(struct sockaddr_l2));
75947 addr->sa_family = AF_BLUETOOTH;
75948 *len = sizeof(struct sockaddr_l2);
75949
75950diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
75951index e8707de..2df6956 100644
75952--- a/net/bluetooth/rfcomm/sock.c
75953+++ b/net/bluetooth/rfcomm/sock.c
75954@@ -547,6 +547,7 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
75955
75956 BT_DBG("sock %p, sk %p", sock, sk);
75957
75958+ memset(sa, 0, sizeof(*sa));
75959 sa->rc_family = AF_BLUETOOTH;
75960 sa->rc_channel = rfcomm_pi(sk)->channel;
75961 if (peer)
75962@@ -841,6 +842,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
75963 }
75964
75965 sec.level = rfcomm_pi(sk)->sec_level;
75966+ sec.key_size = 0;
75967
75968 len = min_t(unsigned int, len, sizeof(sec));
75969 if (copy_to_user(optval, (char *) &sec, len))
75970diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
75971index d1820ff..4f8c8f6 100644
75972--- a/net/bluetooth/rfcomm/tty.c
75973+++ b/net/bluetooth/rfcomm/tty.c
75974@@ -314,7 +314,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
75975 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
75976
75977 spin_lock_irqsave(&dev->port.lock, flags);
75978- if (dev->port.count > 0) {
75979+ if (atomic_read(&dev->port.count) > 0) {
75980 spin_unlock_irqrestore(&dev->port.lock, flags);
75981 return;
75982 }
75983@@ -461,7 +461,7 @@ static int rfcomm_get_dev_list(void __user *arg)
75984
75985 size = sizeof(*dl) + dev_num * sizeof(*di);
75986
75987- dl = kmalloc(size, GFP_KERNEL);
75988+ dl = kzalloc(size, GFP_KERNEL);
75989 if (!dl)
75990 return -ENOMEM;
75991
75992@@ -669,10 +669,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
75993 return -ENODEV;
75994
75995 BT_DBG("dev %p dst %s channel %d opened %d", dev, batostr(&dev->dst),
75996- dev->channel, dev->port.count);
75997+ dev->channel, atomic_read(&dev->port.count));
75998
75999 spin_lock_irqsave(&dev->port.lock, flags);
76000- if (++dev->port.count > 1) {
76001+ if (atomic_inc_return(&dev->port.count) > 1) {
76002 spin_unlock_irqrestore(&dev->port.lock, flags);
76003 return 0;
76004 }
76005@@ -737,10 +737,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
76006 return;
76007
76008 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
76009- dev->port.count);
76010+ atomic_read(&dev->port.count));
76011
76012 spin_lock_irqsave(&dev->port.lock, flags);
76013- if (!--dev->port.count) {
76014+ if (!atomic_dec_return(&dev->port.count)) {
76015 spin_unlock_irqrestore(&dev->port.lock, flags);
76016 if (dev->tty_dev->parent)
76017 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
76018diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
76019index 5fe2ff3..10968b5 100644
76020--- a/net/bridge/netfilter/ebtables.c
76021+++ b/net/bridge/netfilter/ebtables.c
76022@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
76023 tmp.valid_hooks = t->table->valid_hooks;
76024 }
76025 mutex_unlock(&ebt_mutex);
76026- if (copy_to_user(user, &tmp, *len) != 0){
76027+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
76028 BUGPRINT("c2u Didn't work\n");
76029 ret = -EFAULT;
76030 break;
76031diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
76032index 047cd0e..461fd28 100644
76033--- a/net/caif/cfctrl.c
76034+++ b/net/caif/cfctrl.c
76035@@ -10,6 +10,7 @@
76036 #include <linux/spinlock.h>
76037 #include <linux/slab.h>
76038 #include <linux/pkt_sched.h>
76039+#include <linux/sched.h>
76040 #include <net/caif/caif_layer.h>
76041 #include <net/caif/cfpkt.h>
76042 #include <net/caif/cfctrl.h>
76043@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
76044 memset(&dev_info, 0, sizeof(dev_info));
76045 dev_info.id = 0xff;
76046 cfsrvl_init(&this->serv, 0, &dev_info, false);
76047- atomic_set(&this->req_seq_no, 1);
76048- atomic_set(&this->rsp_seq_no, 1);
76049+ atomic_set_unchecked(&this->req_seq_no, 1);
76050+ atomic_set_unchecked(&this->rsp_seq_no, 1);
76051 this->serv.layer.receive = cfctrl_recv;
76052 sprintf(this->serv.layer.name, "ctrl");
76053 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
76054@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
76055 struct cfctrl_request_info *req)
76056 {
76057 spin_lock_bh(&ctrl->info_list_lock);
76058- atomic_inc(&ctrl->req_seq_no);
76059- req->sequence_no = atomic_read(&ctrl->req_seq_no);
76060+ atomic_inc_unchecked(&ctrl->req_seq_no);
76061+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
76062 list_add_tail(&req->list, &ctrl->list);
76063 spin_unlock_bh(&ctrl->info_list_lock);
76064 }
76065@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
76066 if (p != first)
76067 pr_warn("Requests are not received in order\n");
76068
76069- atomic_set(&ctrl->rsp_seq_no,
76070+ atomic_set_unchecked(&ctrl->rsp_seq_no,
76071 p->sequence_no);
76072 list_del(&p->list);
76073 goto out;
76074diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
76075index 69771c0..e597733 100644
76076--- a/net/caif/chnl_net.c
76077+++ b/net/caif/chnl_net.c
76078@@ -94,6 +94,10 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
76079
76080 /* check the version of IP */
76081 ip_version = skb_header_pointer(skb, 0, 1, &buf);
76082+ if (!ip_version) {
76083+ kfree_skb(skb);
76084+ return -EINVAL;
76085+ }
76086
76087 switch (*ip_version >> 4) {
76088 case 4:
76089diff --git a/net/can/gw.c b/net/can/gw.c
76090index b41acf2..3affb3a 100644
76091--- a/net/can/gw.c
76092+++ b/net/can/gw.c
76093@@ -96,7 +96,7 @@ struct cf_mod {
76094 struct {
76095 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
76096 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
76097- } csumfunc;
76098+ } __no_const csumfunc;
76099 };
76100
76101
76102diff --git a/net/compat.c b/net/compat.c
76103index 74ed1d7..62f7ea6 100644
76104--- a/net/compat.c
76105+++ b/net/compat.c
76106@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
76107 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
76108 __get_user(kmsg->msg_flags, &umsg->msg_flags))
76109 return -EFAULT;
76110- kmsg->msg_name = compat_ptr(tmp1);
76111- kmsg->msg_iov = compat_ptr(tmp2);
76112- kmsg->msg_control = compat_ptr(tmp3);
76113+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
76114+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
76115+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
76116 return 0;
76117 }
76118
76119@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
76120
76121 if (kern_msg->msg_namelen) {
76122 if (mode == VERIFY_READ) {
76123- int err = move_addr_to_kernel(kern_msg->msg_name,
76124+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
76125 kern_msg->msg_namelen,
76126 kern_address);
76127 if (err < 0)
76128@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
76129 kern_msg->msg_name = NULL;
76130
76131 tot_len = iov_from_user_compat_to_kern(kern_iov,
76132- (struct compat_iovec __user *)kern_msg->msg_iov,
76133+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
76134 kern_msg->msg_iovlen);
76135 if (tot_len >= 0)
76136 kern_msg->msg_iov = kern_iov;
76137@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
76138
76139 #define CMSG_COMPAT_FIRSTHDR(msg) \
76140 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
76141- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
76142+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
76143 (struct compat_cmsghdr __user *)NULL)
76144
76145 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
76146 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
76147 (ucmlen) <= (unsigned long) \
76148 ((mhdr)->msg_controllen - \
76149- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
76150+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
76151
76152 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
76153 struct compat_cmsghdr __user *cmsg, int cmsg_len)
76154 {
76155 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
76156- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
76157+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
76158 msg->msg_controllen)
76159 return NULL;
76160 return (struct compat_cmsghdr __user *)ptr;
76161@@ -219,7 +219,7 @@ Efault:
76162
76163 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
76164 {
76165- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
76166+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
76167 struct compat_cmsghdr cmhdr;
76168 struct compat_timeval ctv;
76169 struct compat_timespec cts[3];
76170@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
76171
76172 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
76173 {
76174- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
76175+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
76176 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
76177 int fdnum = scm->fp->count;
76178 struct file **fp = scm->fp->fp;
76179@@ -364,7 +364,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
76180 return -EFAULT;
76181 old_fs = get_fs();
76182 set_fs(KERNEL_DS);
76183- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
76184+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
76185 set_fs(old_fs);
76186
76187 return err;
76188@@ -425,7 +425,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
76189 len = sizeof(ktime);
76190 old_fs = get_fs();
76191 set_fs(KERNEL_DS);
76192- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
76193+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
76194 set_fs(old_fs);
76195
76196 if (!err) {
76197@@ -568,7 +568,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
76198 case MCAST_JOIN_GROUP:
76199 case MCAST_LEAVE_GROUP:
76200 {
76201- struct compat_group_req __user *gr32 = (void *)optval;
76202+ struct compat_group_req __user *gr32 = (void __user *)optval;
76203 struct group_req __user *kgr =
76204 compat_alloc_user_space(sizeof(struct group_req));
76205 u32 interface;
76206@@ -589,7 +589,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
76207 case MCAST_BLOCK_SOURCE:
76208 case MCAST_UNBLOCK_SOURCE:
76209 {
76210- struct compat_group_source_req __user *gsr32 = (void *)optval;
76211+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
76212 struct group_source_req __user *kgsr = compat_alloc_user_space(
76213 sizeof(struct group_source_req));
76214 u32 interface;
76215@@ -610,7 +610,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
76216 }
76217 case MCAST_MSFILTER:
76218 {
76219- struct compat_group_filter __user *gf32 = (void *)optval;
76220+ struct compat_group_filter __user *gf32 = (void __user *)optval;
76221 struct group_filter __user *kgf;
76222 u32 interface, fmode, numsrc;
76223
76224@@ -648,7 +648,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
76225 char __user *optval, int __user *optlen,
76226 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
76227 {
76228- struct compat_group_filter __user *gf32 = (void *)optval;
76229+ struct compat_group_filter __user *gf32 = (void __user *)optval;
76230 struct group_filter __user *kgf;
76231 int __user *koptlen;
76232 u32 interface, fmode, numsrc;
76233diff --git a/net/core/datagram.c b/net/core/datagram.c
76234index ae6acf6..d5c8f66 100644
76235--- a/net/core/datagram.c
76236+++ b/net/core/datagram.c
76237@@ -290,7 +290,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
76238 }
76239
76240 kfree_skb(skb);
76241- atomic_inc(&sk->sk_drops);
76242+ atomic_inc_unchecked(&sk->sk_drops);
76243 sk_mem_reclaim_partial(sk);
76244
76245 return err;
76246diff --git a/net/core/dev.c b/net/core/dev.c
76247index a000840..566cee1 100644
76248--- a/net/core/dev.c
76249+++ b/net/core/dev.c
76250@@ -1135,9 +1135,13 @@ void dev_load(struct net *net, const char *name)
76251 if (no_module && capable(CAP_NET_ADMIN))
76252 no_module = request_module("netdev-%s", name);
76253 if (no_module && capable(CAP_SYS_MODULE)) {
76254+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76255+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
76256+#else
76257 if (!request_module("%s", name))
76258 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
76259 name);
76260+#endif
76261 }
76262 }
76263 EXPORT_SYMBOL(dev_load);
76264@@ -1602,7 +1606,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
76265 {
76266 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
76267 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
76268- atomic_long_inc(&dev->rx_dropped);
76269+ atomic_long_inc_unchecked(&dev->rx_dropped);
76270 kfree_skb(skb);
76271 return NET_RX_DROP;
76272 }
76273@@ -1612,7 +1616,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
76274 nf_reset(skb);
76275
76276 if (unlikely(!is_skb_forwardable(dev, skb))) {
76277- atomic_long_inc(&dev->rx_dropped);
76278+ atomic_long_inc_unchecked(&dev->rx_dropped);
76279 kfree_skb(skb);
76280 return NET_RX_DROP;
76281 }
76282@@ -2041,7 +2045,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
76283
76284 struct dev_gso_cb {
76285 void (*destructor)(struct sk_buff *skb);
76286-};
76287+} __no_const;
76288
76289 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
76290
76291@@ -2876,7 +2880,7 @@ enqueue:
76292
76293 local_irq_restore(flags);
76294
76295- atomic_long_inc(&skb->dev->rx_dropped);
76296+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
76297 kfree_skb(skb);
76298 return NET_RX_DROP;
76299 }
76300@@ -2948,7 +2952,7 @@ int netif_rx_ni(struct sk_buff *skb)
76301 }
76302 EXPORT_SYMBOL(netif_rx_ni);
76303
76304-static void net_tx_action(struct softirq_action *h)
76305+static void net_tx_action(void)
76306 {
76307 struct softnet_data *sd = &__get_cpu_var(softnet_data);
76308
76309@@ -3235,7 +3239,7 @@ ncls:
76310 if (pt_prev) {
76311 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
76312 } else {
76313- atomic_long_inc(&skb->dev->rx_dropped);
76314+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
76315 kfree_skb(skb);
76316 /* Jamal, now you will not able to escape explaining
76317 * me how you were going to use this. :-)
76318@@ -3800,7 +3804,7 @@ void netif_napi_del(struct napi_struct *napi)
76319 }
76320 EXPORT_SYMBOL(netif_napi_del);
76321
76322-static void net_rx_action(struct softirq_action *h)
76323+static void net_rx_action(void)
76324 {
76325 struct softnet_data *sd = &__get_cpu_var(softnet_data);
76326 unsigned long time_limit = jiffies + 2;
76327@@ -4270,8 +4274,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
76328 else
76329 seq_printf(seq, "%04x", ntohs(pt->type));
76330
76331+#ifdef CONFIG_GRKERNSEC_HIDESYM
76332+ seq_printf(seq, " %-8s %p\n",
76333+ pt->dev ? pt->dev->name : "", NULL);
76334+#else
76335 seq_printf(seq, " %-8s %pF\n",
76336 pt->dev ? pt->dev->name : "", pt->func);
76337+#endif
76338 }
76339
76340 return 0;
76341@@ -5823,7 +5832,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
76342 } else {
76343 netdev_stats_to_stats64(storage, &dev->stats);
76344 }
76345- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
76346+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
76347 return storage;
76348 }
76349 EXPORT_SYMBOL(dev_get_stats);
76350diff --git a/net/core/flow.c b/net/core/flow.c
76351index e318c7e..168b1d0 100644
76352--- a/net/core/flow.c
76353+++ b/net/core/flow.c
76354@@ -61,7 +61,7 @@ struct flow_cache {
76355 struct timer_list rnd_timer;
76356 };
76357
76358-atomic_t flow_cache_genid = ATOMIC_INIT(0);
76359+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
76360 EXPORT_SYMBOL(flow_cache_genid);
76361 static struct flow_cache flow_cache_global;
76362 static struct kmem_cache *flow_cachep __read_mostly;
76363@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
76364
76365 static int flow_entry_valid(struct flow_cache_entry *fle)
76366 {
76367- if (atomic_read(&flow_cache_genid) != fle->genid)
76368+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
76369 return 0;
76370 if (fle->object && !fle->object->ops->check(fle->object))
76371 return 0;
76372@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
76373 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
76374 fcp->hash_count++;
76375 }
76376- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
76377+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
76378 flo = fle->object;
76379 if (!flo)
76380 goto ret_object;
76381@@ -280,7 +280,7 @@ nocache:
76382 }
76383 flo = resolver(net, key, family, dir, flo, ctx);
76384 if (fle) {
76385- fle->genid = atomic_read(&flow_cache_genid);
76386+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
76387 if (!IS_ERR(flo))
76388 fle->object = flo;
76389 else
76390diff --git a/net/core/iovec.c b/net/core/iovec.c
76391index 7e7aeb0..2a998cb 100644
76392--- a/net/core/iovec.c
76393+++ b/net/core/iovec.c
76394@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
76395 if (m->msg_namelen) {
76396 if (mode == VERIFY_READ) {
76397 void __user *namep;
76398- namep = (void __user __force *) m->msg_name;
76399+ namep = (void __force_user *) m->msg_name;
76400 err = move_addr_to_kernel(namep, m->msg_namelen,
76401 address);
76402 if (err < 0)
76403@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
76404 }
76405
76406 size = m->msg_iovlen * sizeof(struct iovec);
76407- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
76408+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
76409 return -EFAULT;
76410
76411 m->msg_iov = iov;
76412diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
76413index 6c50ac0..6b4c038 100644
76414--- a/net/core/rtnetlink.c
76415+++ b/net/core/rtnetlink.c
76416@@ -58,7 +58,7 @@ struct rtnl_link {
76417 rtnl_doit_func doit;
76418 rtnl_dumpit_func dumpit;
76419 rtnl_calcit_func calcit;
76420-};
76421+} __no_const;
76422
76423 static DEFINE_MUTEX(rtnl_mutex);
76424
76425diff --git a/net/core/scm.c b/net/core/scm.c
76426index 611c5ef..88f6d6d 100644
76427--- a/net/core/scm.c
76428+++ b/net/core/scm.c
76429@@ -219,7 +219,7 @@ EXPORT_SYMBOL(__scm_send);
76430 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
76431 {
76432 struct cmsghdr __user *cm
76433- = (__force struct cmsghdr __user *)msg->msg_control;
76434+ = (struct cmsghdr __force_user *)msg->msg_control;
76435 struct cmsghdr cmhdr;
76436 int cmlen = CMSG_LEN(len);
76437 int err;
76438@@ -242,7 +242,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
76439 err = -EFAULT;
76440 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
76441 goto out;
76442- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
76443+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
76444 goto out;
76445 cmlen = CMSG_SPACE(len);
76446 if (msg->msg_controllen < cmlen)
76447@@ -258,7 +258,7 @@ EXPORT_SYMBOL(put_cmsg);
76448 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
76449 {
76450 struct cmsghdr __user *cm
76451- = (__force struct cmsghdr __user*)msg->msg_control;
76452+ = (struct cmsghdr __force_user *)msg->msg_control;
76453
76454 int fdmax = 0;
76455 int fdnum = scm->fp->count;
76456@@ -278,7 +278,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
76457 if (fdnum < fdmax)
76458 fdmax = fdnum;
76459
76460- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
76461+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
76462 i++, cmfptr++)
76463 {
76464 int new_fd;
76465diff --git a/net/core/sock.c b/net/core/sock.c
76466index 9e5b71f..66dec30 100644
76467--- a/net/core/sock.c
76468+++ b/net/core/sock.c
76469@@ -344,7 +344,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76470 struct sk_buff_head *list = &sk->sk_receive_queue;
76471
76472 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
76473- atomic_inc(&sk->sk_drops);
76474+ atomic_inc_unchecked(&sk->sk_drops);
76475 trace_sock_rcvqueue_full(sk, skb);
76476 return -ENOMEM;
76477 }
76478@@ -354,7 +354,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76479 return err;
76480
76481 if (!sk_rmem_schedule(sk, skb->truesize)) {
76482- atomic_inc(&sk->sk_drops);
76483+ atomic_inc_unchecked(&sk->sk_drops);
76484 return -ENOBUFS;
76485 }
76486
76487@@ -374,7 +374,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76488 skb_dst_force(skb);
76489
76490 spin_lock_irqsave(&list->lock, flags);
76491- skb->dropcount = atomic_read(&sk->sk_drops);
76492+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
76493 __skb_queue_tail(list, skb);
76494 spin_unlock_irqrestore(&list->lock, flags);
76495
76496@@ -394,7 +394,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
76497 skb->dev = NULL;
76498
76499 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
76500- atomic_inc(&sk->sk_drops);
76501+ atomic_inc_unchecked(&sk->sk_drops);
76502 goto discard_and_relse;
76503 }
76504 if (nested)
76505@@ -412,7 +412,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
76506 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
76507 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
76508 bh_unlock_sock(sk);
76509- atomic_inc(&sk->sk_drops);
76510+ atomic_inc_unchecked(&sk->sk_drops);
76511 goto discard_and_relse;
76512 }
76513
76514@@ -976,7 +976,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
76515 if (len > sizeof(peercred))
76516 len = sizeof(peercred);
76517 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
76518- if (copy_to_user(optval, &peercred, len))
76519+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
76520 return -EFAULT;
76521 goto lenout;
76522 }
76523@@ -989,7 +989,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
76524 return -ENOTCONN;
76525 if (lv < len)
76526 return -EINVAL;
76527- if (copy_to_user(optval, address, len))
76528+ if (len > sizeof(address) || copy_to_user(optval, address, len))
76529 return -EFAULT;
76530 goto lenout;
76531 }
76532@@ -1035,7 +1035,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
76533
76534 if (len > lv)
76535 len = lv;
76536- if (copy_to_user(optval, &v, len))
76537+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
76538 return -EFAULT;
76539 lenout:
76540 if (put_user(len, optlen))
76541@@ -2124,7 +2124,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
76542 */
76543 smp_wmb();
76544 atomic_set(&sk->sk_refcnt, 1);
76545- atomic_set(&sk->sk_drops, 0);
76546+ atomic_set_unchecked(&sk->sk_drops, 0);
76547 }
76548 EXPORT_SYMBOL(sock_init_data);
76549
76550diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
76551index 5fd1467..8b70900 100644
76552--- a/net/core/sock_diag.c
76553+++ b/net/core/sock_diag.c
76554@@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
76555
76556 int sock_diag_check_cookie(void *sk, __u32 *cookie)
76557 {
76558+#ifndef CONFIG_GRKERNSEC_HIDESYM
76559 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
76560 cookie[1] != INET_DIAG_NOCOOKIE) &&
76561 ((u32)(unsigned long)sk != cookie[0] ||
76562 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
76563 return -ESTALE;
76564 else
76565+#endif
76566 return 0;
76567 }
76568 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
76569
76570 void sock_diag_save_cookie(void *sk, __u32 *cookie)
76571 {
76572+#ifdef CONFIG_GRKERNSEC_HIDESYM
76573+ cookie[0] = 0;
76574+ cookie[1] = 0;
76575+#else
76576 cookie[0] = (u32)(unsigned long)sk;
76577 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
76578+#endif
76579 }
76580 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
76581
76582diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
76583index 8c67bed..ce0d140 100644
76584--- a/net/dccp/ccids/ccid3.c
76585+++ b/net/dccp/ccids/ccid3.c
76586@@ -531,6 +531,7 @@ static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
76587 case DCCP_SOCKOPT_CCID_TX_INFO:
76588 if (len < sizeof(tfrc))
76589 return -EINVAL;
76590+ memset(&tfrc, 0, sizeof(tfrc));
76591 tfrc.tfrctx_x = hc->tx_x;
76592 tfrc.tfrctx_x_recv = hc->tx_x_recv;
76593 tfrc.tfrctx_x_calc = hc->tx_x_calc;
76594diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
76595index a55eecc..dd8428c 100644
76596--- a/net/decnet/sysctl_net_decnet.c
76597+++ b/net/decnet/sysctl_net_decnet.c
76598@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
76599
76600 if (len > *lenp) len = *lenp;
76601
76602- if (copy_to_user(buffer, addr, len))
76603+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
76604 return -EFAULT;
76605
76606 *lenp = len;
76607@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
76608
76609 if (len > *lenp) len = *lenp;
76610
76611- if (copy_to_user(buffer, devname, len))
76612+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
76613 return -EFAULT;
76614
76615 *lenp = len;
76616diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
76617index 3854411..2201a94 100644
76618--- a/net/ipv4/fib_frontend.c
76619+++ b/net/ipv4/fib_frontend.c
76620@@ -969,12 +969,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
76621 #ifdef CONFIG_IP_ROUTE_MULTIPATH
76622 fib_sync_up(dev);
76623 #endif
76624- atomic_inc(&net->ipv4.dev_addr_genid);
76625+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
76626 rt_cache_flush(dev_net(dev), -1);
76627 break;
76628 case NETDEV_DOWN:
76629 fib_del_ifaddr(ifa, NULL);
76630- atomic_inc(&net->ipv4.dev_addr_genid);
76631+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
76632 if (ifa->ifa_dev->ifa_list == NULL) {
76633 /* Last address was deleted from this interface.
76634 * Disable IP.
76635@@ -1010,7 +1010,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
76636 #ifdef CONFIG_IP_ROUTE_MULTIPATH
76637 fib_sync_up(dev);
76638 #endif
76639- atomic_inc(&net->ipv4.dev_addr_genid);
76640+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
76641 rt_cache_flush(dev_net(dev), -1);
76642 break;
76643 case NETDEV_DOWN:
76644diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
76645index e5b7182..570a90e 100644
76646--- a/net/ipv4/fib_semantics.c
76647+++ b/net/ipv4/fib_semantics.c
76648@@ -698,7 +698,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
76649 nh->nh_saddr = inet_select_addr(nh->nh_dev,
76650 nh->nh_gw,
76651 nh->nh_parent->fib_scope);
76652- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
76653+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
76654
76655 return nh->nh_saddr;
76656 }
76657diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
76658index 7880af9..70f92a3 100644
76659--- a/net/ipv4/inet_hashtables.c
76660+++ b/net/ipv4/inet_hashtables.c
76661@@ -18,12 +18,15 @@
76662 #include <linux/sched.h>
76663 #include <linux/slab.h>
76664 #include <linux/wait.h>
76665+#include <linux/security.h>
76666
76667 #include <net/inet_connection_sock.h>
76668 #include <net/inet_hashtables.h>
76669 #include <net/secure_seq.h>
76670 #include <net/ip.h>
76671
76672+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
76673+
76674 /*
76675 * Allocate and initialize a new local port bind bucket.
76676 * The bindhash mutex for snum's hash chain must be held here.
76677@@ -530,6 +533,8 @@ ok:
76678 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
76679 spin_unlock(&head->lock);
76680
76681+ gr_update_task_in_ip_table(current, inet_sk(sk));
76682+
76683 if (tw) {
76684 inet_twsk_deschedule(tw, death_row);
76685 while (twrefcnt) {
76686diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
76687index dfba343..c827d50 100644
76688--- a/net/ipv4/inetpeer.c
76689+++ b/net/ipv4/inetpeer.c
76690@@ -487,8 +487,8 @@ relookup:
76691 if (p) {
76692 p->daddr = *daddr;
76693 atomic_set(&p->refcnt, 1);
76694- atomic_set(&p->rid, 0);
76695- atomic_set(&p->ip_id_count,
76696+ atomic_set_unchecked(&p->rid, 0);
76697+ atomic_set_unchecked(&p->ip_id_count,
76698 (daddr->family == AF_INET) ?
76699 secure_ip_id(daddr->addr.a4) :
76700 secure_ipv6_id(daddr->addr.a6));
76701diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
76702index 9dbd3dd..0c59fb2 100644
76703--- a/net/ipv4/ip_fragment.c
76704+++ b/net/ipv4/ip_fragment.c
76705@@ -318,7 +318,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
76706 return 0;
76707
76708 start = qp->rid;
76709- end = atomic_inc_return(&peer->rid);
76710+ end = atomic_inc_return_unchecked(&peer->rid);
76711 qp->rid = end;
76712
76713 rc = qp->q.fragments && (end - start) > max;
76714diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
76715index 0d11f23..2bb3f64 100644
76716--- a/net/ipv4/ip_sockglue.c
76717+++ b/net/ipv4/ip_sockglue.c
76718@@ -1142,7 +1142,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
76719 len = min_t(unsigned int, len, opt->optlen);
76720 if (put_user(len, optlen))
76721 return -EFAULT;
76722- if (copy_to_user(optval, opt->__data, len))
76723+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
76724+ copy_to_user(optval, opt->__data, len))
76725 return -EFAULT;
76726 return 0;
76727 }
76728@@ -1273,7 +1274,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
76729 if (sk->sk_type != SOCK_STREAM)
76730 return -ENOPROTOOPT;
76731
76732- msg.msg_control = optval;
76733+ msg.msg_control = (void __force_kernel *)optval;
76734 msg.msg_controllen = len;
76735 msg.msg_flags = flags;
76736
76737diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
76738index 67e8a6b..386764d 100644
76739--- a/net/ipv4/ipconfig.c
76740+++ b/net/ipv4/ipconfig.c
76741@@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
76742
76743 mm_segment_t oldfs = get_fs();
76744 set_fs(get_ds());
76745- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
76746+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
76747 set_fs(oldfs);
76748 return res;
76749 }
76750@@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
76751
76752 mm_segment_t oldfs = get_fs();
76753 set_fs(get_ds());
76754- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
76755+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
76756 set_fs(oldfs);
76757 return res;
76758 }
76759@@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
76760
76761 mm_segment_t oldfs = get_fs();
76762 set_fs(get_ds());
76763- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
76764+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
76765 set_fs(oldfs);
76766 return res;
76767 }
76768diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
76769index 2c00e8b..45b3bdd 100644
76770--- a/net/ipv4/ping.c
76771+++ b/net/ipv4/ping.c
76772@@ -845,7 +845,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
76773 sk_rmem_alloc_get(sp),
76774 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76775 atomic_read(&sp->sk_refcnt), sp,
76776- atomic_read(&sp->sk_drops), len);
76777+ atomic_read_unchecked(&sp->sk_drops), len);
76778 }
76779
76780 static int ping_seq_show(struct seq_file *seq, void *v)
76781diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
76782index 4032b81..625143c 100644
76783--- a/net/ipv4/raw.c
76784+++ b/net/ipv4/raw.c
76785@@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
76786 int raw_rcv(struct sock *sk, struct sk_buff *skb)
76787 {
76788 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
76789- atomic_inc(&sk->sk_drops);
76790+ atomic_inc_unchecked(&sk->sk_drops);
76791 kfree_skb(skb);
76792 return NET_RX_DROP;
76793 }
76794@@ -740,16 +740,20 @@ static int raw_init(struct sock *sk)
76795
76796 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
76797 {
76798+ struct icmp_filter filter;
76799+
76800 if (optlen > sizeof(struct icmp_filter))
76801 optlen = sizeof(struct icmp_filter);
76802- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
76803+ if (copy_from_user(&filter, optval, optlen))
76804 return -EFAULT;
76805+ raw_sk(sk)->filter = filter;
76806 return 0;
76807 }
76808
76809 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
76810 {
76811 int len, ret = -EFAULT;
76812+ struct icmp_filter filter;
76813
76814 if (get_user(len, optlen))
76815 goto out;
76816@@ -759,8 +763,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
76817 if (len > sizeof(struct icmp_filter))
76818 len = sizeof(struct icmp_filter);
76819 ret = -EFAULT;
76820- if (put_user(len, optlen) ||
76821- copy_to_user(optval, &raw_sk(sk)->filter, len))
76822+ filter = raw_sk(sk)->filter;
76823+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
76824 goto out;
76825 ret = 0;
76826 out: return ret;
76827@@ -988,7 +992,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
76828 sk_wmem_alloc_get(sp),
76829 sk_rmem_alloc_get(sp),
76830 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76831- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
76832+ atomic_read(&sp->sk_refcnt),
76833+#ifdef CONFIG_GRKERNSEC_HIDESYM
76834+ NULL,
76835+#else
76836+ sp,
76837+#endif
76838+ atomic_read_unchecked(&sp->sk_drops));
76839 }
76840
76841 static int raw_seq_show(struct seq_file *seq, void *v)
76842diff --git a/net/ipv4/route.c b/net/ipv4/route.c
76843index 98b30d0..cfa3cf7 100644
76844--- a/net/ipv4/route.c
76845+++ b/net/ipv4/route.c
76846@@ -313,7 +313,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
76847
76848 static inline int rt_genid(struct net *net)
76849 {
76850- return atomic_read(&net->ipv4.rt_genid);
76851+ return atomic_read_unchecked(&net->ipv4.rt_genid);
76852 }
76853
76854 #ifdef CONFIG_PROC_FS
76855@@ -937,7 +937,7 @@ static void rt_cache_invalidate(struct net *net)
76856 unsigned char shuffle;
76857
76858 get_random_bytes(&shuffle, sizeof(shuffle));
76859- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
76860+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
76861 inetpeer_invalidate_tree(AF_INET);
76862 }
76863
76864@@ -3011,7 +3011,7 @@ static int rt_fill_info(struct net *net,
76865 error = rt->dst.error;
76866 if (peer) {
76867 inet_peer_refcheck(rt->peer);
76868- id = atomic_read(&peer->ip_id_count) & 0xffff;
76869+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
76870 if (peer->tcp_ts_stamp) {
76871 ts = peer->tcp_ts;
76872 tsage = get_seconds() - peer->tcp_ts_stamp;
76873diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
76874index 05fe1f4..cc273dd 100644
76875--- a/net/ipv4/tcp_input.c
76876+++ b/net/ipv4/tcp_input.c
76877@@ -4886,7 +4886,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
76878 * simplifies code)
76879 */
76880 static void
76881-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
76882+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
76883 struct sk_buff *head, struct sk_buff *tail,
76884 u32 start, u32 end)
76885 {
76886diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
76887index c8d28c4..e40f75a 100644
76888--- a/net/ipv4/tcp_ipv4.c
76889+++ b/net/ipv4/tcp_ipv4.c
76890@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
76891 EXPORT_SYMBOL(sysctl_tcp_low_latency);
76892
76893
76894+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76895+extern int grsec_enable_blackhole;
76896+#endif
76897+
76898 #ifdef CONFIG_TCP_MD5SIG
76899 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
76900 __be32 daddr, __be32 saddr, const struct tcphdr *th);
76901@@ -1656,6 +1660,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
76902 return 0;
76903
76904 reset:
76905+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76906+ if (!grsec_enable_blackhole)
76907+#endif
76908 tcp_v4_send_reset(rsk, skb);
76909 discard:
76910 kfree_skb(skb);
76911@@ -1718,12 +1725,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
76912 TCP_SKB_CB(skb)->sacked = 0;
76913
76914 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
76915- if (!sk)
76916+ if (!sk) {
76917+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76918+ ret = 1;
76919+#endif
76920 goto no_tcp_socket;
76921-
76922+ }
76923 process:
76924- if (sk->sk_state == TCP_TIME_WAIT)
76925+ if (sk->sk_state == TCP_TIME_WAIT) {
76926+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76927+ ret = 2;
76928+#endif
76929 goto do_time_wait;
76930+ }
76931
76932 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
76933 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
76934@@ -1774,6 +1788,10 @@ no_tcp_socket:
76935 bad_packet:
76936 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
76937 } else {
76938+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76939+ if (!grsec_enable_blackhole || (ret == 1 &&
76940+ (skb->dev->flags & IFF_LOOPBACK)))
76941+#endif
76942 tcp_v4_send_reset(NULL, skb);
76943 }
76944
76945@@ -2386,7 +2404,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
76946 0, /* non standard timer */
76947 0, /* open_requests have no inode */
76948 atomic_read(&sk->sk_refcnt),
76949+#ifdef CONFIG_GRKERNSEC_HIDESYM
76950+ NULL,
76951+#else
76952 req,
76953+#endif
76954 len);
76955 }
76956
76957@@ -2436,7 +2458,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
76958 sock_i_uid(sk),
76959 icsk->icsk_probes_out,
76960 sock_i_ino(sk),
76961- atomic_read(&sk->sk_refcnt), sk,
76962+ atomic_read(&sk->sk_refcnt),
76963+#ifdef CONFIG_GRKERNSEC_HIDESYM
76964+ NULL,
76965+#else
76966+ sk,
76967+#endif
76968 jiffies_to_clock_t(icsk->icsk_rto),
76969 jiffies_to_clock_t(icsk->icsk_ack.ato),
76970 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
76971@@ -2464,7 +2491,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
76972 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
76973 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
76974 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
76975- atomic_read(&tw->tw_refcnt), tw, len);
76976+ atomic_read(&tw->tw_refcnt),
76977+#ifdef CONFIG_GRKERNSEC_HIDESYM
76978+ NULL,
76979+#else
76980+ tw,
76981+#endif
76982+ len);
76983 }
76984
76985 #define TMPSZ 150
76986diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
76987index b85d9fe..4b0eed9 100644
76988--- a/net/ipv4/tcp_minisocks.c
76989+++ b/net/ipv4/tcp_minisocks.c
76990@@ -27,6 +27,10 @@
76991 #include <net/inet_common.h>
76992 #include <net/xfrm.h>
76993
76994+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76995+extern int grsec_enable_blackhole;
76996+#endif
76997+
76998 int sysctl_tcp_syncookies __read_mostly = 1;
76999 EXPORT_SYMBOL(sysctl_tcp_syncookies);
77000
77001@@ -754,6 +758,10 @@ listen_overflow:
77002
77003 embryonic_reset:
77004 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
77005+
77006+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77007+ if (!grsec_enable_blackhole)
77008+#endif
77009 if (!(flg & TCP_FLAG_RST))
77010 req->rsk_ops->send_reset(sk, skb);
77011
77012diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
77013index 4526fe6..1a34e43 100644
77014--- a/net/ipv4/tcp_probe.c
77015+++ b/net/ipv4/tcp_probe.c
77016@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
77017 if (cnt + width >= len)
77018 break;
77019
77020- if (copy_to_user(buf + cnt, tbuf, width))
77021+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
77022 return -EFAULT;
77023 cnt += width;
77024 }
77025diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
77026index e911e6c..d0a9356 100644
77027--- a/net/ipv4/tcp_timer.c
77028+++ b/net/ipv4/tcp_timer.c
77029@@ -22,6 +22,10 @@
77030 #include <linux/gfp.h>
77031 #include <net/tcp.h>
77032
77033+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77034+extern int grsec_lastack_retries;
77035+#endif
77036+
77037 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
77038 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
77039 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
77040@@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
77041 }
77042 }
77043
77044+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77045+ if ((sk->sk_state == TCP_LAST_ACK) &&
77046+ (grsec_lastack_retries > 0) &&
77047+ (grsec_lastack_retries < retry_until))
77048+ retry_until = grsec_lastack_retries;
77049+#endif
77050+
77051 if (retransmits_timed_out(sk, retry_until,
77052 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
77053 /* Has it gone just too far? */
77054diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
77055index eaca736..60488ae 100644
77056--- a/net/ipv4/udp.c
77057+++ b/net/ipv4/udp.c
77058@@ -87,6 +87,7 @@
77059 #include <linux/types.h>
77060 #include <linux/fcntl.h>
77061 #include <linux/module.h>
77062+#include <linux/security.h>
77063 #include <linux/socket.h>
77064 #include <linux/sockios.h>
77065 #include <linux/igmp.h>
77066@@ -110,6 +111,10 @@
77067 #include <linux/static_key.h>
77068 #include "udp_impl.h"
77069
77070+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77071+extern int grsec_enable_blackhole;
77072+#endif
77073+
77074 struct udp_table udp_table __read_mostly;
77075 EXPORT_SYMBOL(udp_table);
77076
77077@@ -568,6 +573,9 @@ found:
77078 return s;
77079 }
77080
77081+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
77082+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
77083+
77084 /*
77085 * This routine is called by the ICMP module when it gets some
77086 * sort of error condition. If err < 0 then the socket should
77087@@ -859,9 +867,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
77088 dport = usin->sin_port;
77089 if (dport == 0)
77090 return -EINVAL;
77091+
77092+ err = gr_search_udp_sendmsg(sk, usin);
77093+ if (err)
77094+ return err;
77095 } else {
77096 if (sk->sk_state != TCP_ESTABLISHED)
77097 return -EDESTADDRREQ;
77098+
77099+ err = gr_search_udp_sendmsg(sk, NULL);
77100+ if (err)
77101+ return err;
77102+
77103 daddr = inet->inet_daddr;
77104 dport = inet->inet_dport;
77105 /* Open fast path for connected socket.
77106@@ -1103,7 +1120,7 @@ static unsigned int first_packet_length(struct sock *sk)
77107 udp_lib_checksum_complete(skb)) {
77108 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
77109 IS_UDPLITE(sk));
77110- atomic_inc(&sk->sk_drops);
77111+ atomic_inc_unchecked(&sk->sk_drops);
77112 __skb_unlink(skb, rcvq);
77113 __skb_queue_tail(&list_kill, skb);
77114 }
77115@@ -1189,6 +1206,10 @@ try_again:
77116 if (!skb)
77117 goto out;
77118
77119+ err = gr_search_udp_recvmsg(sk, skb);
77120+ if (err)
77121+ goto out_free;
77122+
77123 ulen = skb->len - sizeof(struct udphdr);
77124 copied = len;
77125 if (copied > ulen)
77126@@ -1498,7 +1519,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
77127
77128 drop:
77129 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
77130- atomic_inc(&sk->sk_drops);
77131+ atomic_inc_unchecked(&sk->sk_drops);
77132 kfree_skb(skb);
77133 return -1;
77134 }
77135@@ -1517,7 +1538,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
77136 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
77137
77138 if (!skb1) {
77139- atomic_inc(&sk->sk_drops);
77140+ atomic_inc_unchecked(&sk->sk_drops);
77141 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
77142 IS_UDPLITE(sk));
77143 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
77144@@ -1686,6 +1707,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
77145 goto csum_error;
77146
77147 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
77148+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77149+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
77150+#endif
77151 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
77152
77153 /*
77154@@ -2104,8 +2128,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
77155 sk_wmem_alloc_get(sp),
77156 sk_rmem_alloc_get(sp),
77157 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
77158- atomic_read(&sp->sk_refcnt), sp,
77159- atomic_read(&sp->sk_drops), len);
77160+ atomic_read(&sp->sk_refcnt),
77161+#ifdef CONFIG_GRKERNSEC_HIDESYM
77162+ NULL,
77163+#else
77164+ sp,
77165+#endif
77166+ atomic_read_unchecked(&sp->sk_drops), len);
77167 }
77168
77169 int udp4_seq_show(struct seq_file *seq, void *v)
77170diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
77171index 8f6411c..5767579 100644
77172--- a/net/ipv6/addrconf.c
77173+++ b/net/ipv6/addrconf.c
77174@@ -2145,7 +2145,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
77175 p.iph.ihl = 5;
77176 p.iph.protocol = IPPROTO_IPV6;
77177 p.iph.ttl = 64;
77178- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
77179+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
77180
77181 if (ops->ndo_do_ioctl) {
77182 mm_segment_t oldfs = get_fs();
77183diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
77184index db1521f..ebb3314 100644
77185--- a/net/ipv6/esp6.c
77186+++ b/net/ipv6/esp6.c
77187@@ -166,8 +166,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
77188 struct esp_data *esp = x->data;
77189
77190 /* skb is pure payload to encrypt */
77191- err = -ENOMEM;
77192-
77193 aead = esp->aead;
77194 alen = crypto_aead_authsize(aead);
77195
77196@@ -202,8 +200,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
77197 }
77198
77199 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
77200- if (!tmp)
77201+ if (!tmp) {
77202+ err = -ENOMEM;
77203 goto error;
77204+ }
77205
77206 seqhi = esp_tmp_seqhi(tmp);
77207 iv = esp_tmp_iv(aead, tmp, seqhilen);
77208diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
77209index e6cee52..cf47476 100644
77210--- a/net/ipv6/inet6_connection_sock.c
77211+++ b/net/ipv6/inet6_connection_sock.c
77212@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
77213 #ifdef CONFIG_XFRM
77214 {
77215 struct rt6_info *rt = (struct rt6_info *)dst;
77216- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
77217+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
77218 }
77219 #endif
77220 }
77221@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
77222 #ifdef CONFIG_XFRM
77223 if (dst) {
77224 struct rt6_info *rt = (struct rt6_info *)dst;
77225- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
77226+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
77227 __sk_dst_reset(sk);
77228 dst = NULL;
77229 }
77230diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
77231index ba6d13d..6899122 100644
77232--- a/net/ipv6/ipv6_sockglue.c
77233+++ b/net/ipv6/ipv6_sockglue.c
77234@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
77235 if (sk->sk_type != SOCK_STREAM)
77236 return -ENOPROTOOPT;
77237
77238- msg.msg_control = optval;
77239+ msg.msg_control = (void __force_kernel *)optval;
77240 msg.msg_controllen = len;
77241 msg.msg_flags = flags;
77242
77243diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
77244index 93d6983..8e54c4d 100644
77245--- a/net/ipv6/raw.c
77246+++ b/net/ipv6/raw.c
77247@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
77248 {
77249 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
77250 skb_checksum_complete(skb)) {
77251- atomic_inc(&sk->sk_drops);
77252+ atomic_inc_unchecked(&sk->sk_drops);
77253 kfree_skb(skb);
77254 return NET_RX_DROP;
77255 }
77256@@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
77257 struct raw6_sock *rp = raw6_sk(sk);
77258
77259 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
77260- atomic_inc(&sk->sk_drops);
77261+ atomic_inc_unchecked(&sk->sk_drops);
77262 kfree_skb(skb);
77263 return NET_RX_DROP;
77264 }
77265@@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
77266
77267 if (inet->hdrincl) {
77268 if (skb_checksum_complete(skb)) {
77269- atomic_inc(&sk->sk_drops);
77270+ atomic_inc_unchecked(&sk->sk_drops);
77271 kfree_skb(skb);
77272 return NET_RX_DROP;
77273 }
77274@@ -602,7 +602,7 @@ out:
77275 return err;
77276 }
77277
77278-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
77279+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
77280 struct flowi6 *fl6, struct dst_entry **dstp,
77281 unsigned int flags)
77282 {
77283@@ -914,12 +914,15 @@ do_confirm:
77284 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
77285 char __user *optval, int optlen)
77286 {
77287+ struct icmp6_filter filter;
77288+
77289 switch (optname) {
77290 case ICMPV6_FILTER:
77291 if (optlen > sizeof(struct icmp6_filter))
77292 optlen = sizeof(struct icmp6_filter);
77293- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
77294+ if (copy_from_user(&filter, optval, optlen))
77295 return -EFAULT;
77296+ raw6_sk(sk)->filter = filter;
77297 return 0;
77298 default:
77299 return -ENOPROTOOPT;
77300@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
77301 char __user *optval, int __user *optlen)
77302 {
77303 int len;
77304+ struct icmp6_filter filter;
77305
77306 switch (optname) {
77307 case ICMPV6_FILTER:
77308@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
77309 len = sizeof(struct icmp6_filter);
77310 if (put_user(len, optlen))
77311 return -EFAULT;
77312- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
77313+ filter = raw6_sk(sk)->filter;
77314+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
77315 return -EFAULT;
77316 return 0;
77317 default:
77318@@ -1250,7 +1255,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
77319 0, 0L, 0,
77320 sock_i_uid(sp), 0,
77321 sock_i_ino(sp),
77322- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
77323+ atomic_read(&sp->sk_refcnt),
77324+#ifdef CONFIG_GRKERNSEC_HIDESYM
77325+ NULL,
77326+#else
77327+ sp,
77328+#endif
77329+ atomic_read_unchecked(&sp->sk_drops));
77330 }
77331
77332 static int raw6_seq_show(struct seq_file *seq, void *v)
77333diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
77334index 9df64a5..39875da 100644
77335--- a/net/ipv6/tcp_ipv6.c
77336+++ b/net/ipv6/tcp_ipv6.c
77337@@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
77338 }
77339 #endif
77340
77341+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77342+extern int grsec_enable_blackhole;
77343+#endif
77344+
77345 static void tcp_v6_hash(struct sock *sk)
77346 {
77347 if (sk->sk_state != TCP_CLOSE) {
77348@@ -1544,6 +1548,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
77349 return 0;
77350
77351 reset:
77352+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77353+ if (!grsec_enable_blackhole)
77354+#endif
77355 tcp_v6_send_reset(sk, skb);
77356 discard:
77357 if (opt_skb)
77358@@ -1625,12 +1632,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
77359 TCP_SKB_CB(skb)->sacked = 0;
77360
77361 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
77362- if (!sk)
77363+ if (!sk) {
77364+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77365+ ret = 1;
77366+#endif
77367 goto no_tcp_socket;
77368+ }
77369
77370 process:
77371- if (sk->sk_state == TCP_TIME_WAIT)
77372+ if (sk->sk_state == TCP_TIME_WAIT) {
77373+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77374+ ret = 2;
77375+#endif
77376 goto do_time_wait;
77377+ }
77378
77379 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
77380 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
77381@@ -1679,6 +1694,10 @@ no_tcp_socket:
77382 bad_packet:
77383 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
77384 } else {
77385+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77386+ if (!grsec_enable_blackhole || (ret == 1 &&
77387+ (skb->dev->flags & IFF_LOOPBACK)))
77388+#endif
77389 tcp_v6_send_reset(NULL, skb);
77390 }
77391
77392@@ -1885,7 +1904,13 @@ static void get_openreq6(struct seq_file *seq,
77393 uid,
77394 0, /* non standard timer */
77395 0, /* open_requests have no inode */
77396- 0, req);
77397+ 0,
77398+#ifdef CONFIG_GRKERNSEC_HIDESYM
77399+ NULL
77400+#else
77401+ req
77402+#endif
77403+ );
77404 }
77405
77406 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
77407@@ -1935,7 +1960,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
77408 sock_i_uid(sp),
77409 icsk->icsk_probes_out,
77410 sock_i_ino(sp),
77411- atomic_read(&sp->sk_refcnt), sp,
77412+ atomic_read(&sp->sk_refcnt),
77413+#ifdef CONFIG_GRKERNSEC_HIDESYM
77414+ NULL,
77415+#else
77416+ sp,
77417+#endif
77418 jiffies_to_clock_t(icsk->icsk_rto),
77419 jiffies_to_clock_t(icsk->icsk_ack.ato),
77420 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
77421@@ -1970,7 +2000,13 @@ static void get_timewait6_sock(struct seq_file *seq,
77422 dest->s6_addr32[2], dest->s6_addr32[3], destp,
77423 tw->tw_substate, 0, 0,
77424 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
77425- atomic_read(&tw->tw_refcnt), tw);
77426+ atomic_read(&tw->tw_refcnt),
77427+#ifdef CONFIG_GRKERNSEC_HIDESYM
77428+ NULL
77429+#else
77430+ tw
77431+#endif
77432+ );
77433 }
77434
77435 static int tcp6_seq_show(struct seq_file *seq, void *v)
77436diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
77437index f05099f..ea613f0 100644
77438--- a/net/ipv6/udp.c
77439+++ b/net/ipv6/udp.c
77440@@ -50,6 +50,10 @@
77441 #include <linux/seq_file.h>
77442 #include "udp_impl.h"
77443
77444+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77445+extern int grsec_enable_blackhole;
77446+#endif
77447+
77448 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
77449 {
77450 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
77451@@ -615,7 +619,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
77452 return rc;
77453 drop:
77454 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
77455- atomic_inc(&sk->sk_drops);
77456+ atomic_inc_unchecked(&sk->sk_drops);
77457 kfree_skb(skb);
77458 return -1;
77459 }
77460@@ -673,7 +677,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
77461 if (likely(skb1 == NULL))
77462 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
77463 if (!skb1) {
77464- atomic_inc(&sk->sk_drops);
77465+ atomic_inc_unchecked(&sk->sk_drops);
77466 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
77467 IS_UDPLITE(sk));
77468 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
77469@@ -844,6 +848,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
77470 goto discard;
77471
77472 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
77473+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77474+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
77475+#endif
77476 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
77477
77478 kfree_skb(skb);
77479@@ -1453,8 +1460,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
77480 0, 0L, 0,
77481 sock_i_uid(sp), 0,
77482 sock_i_ino(sp),
77483- atomic_read(&sp->sk_refcnt), sp,
77484- atomic_read(&sp->sk_drops));
77485+ atomic_read(&sp->sk_refcnt),
77486+#ifdef CONFIG_GRKERNSEC_HIDESYM
77487+ NULL,
77488+#else
77489+ sp,
77490+#endif
77491+ atomic_read_unchecked(&sp->sk_drops));
77492 }
77493
77494 int udp6_seq_show(struct seq_file *seq, void *v)
77495diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
77496index 6b9d5a0..4dffaf1 100644
77497--- a/net/irda/ircomm/ircomm_tty.c
77498+++ b/net/irda/ircomm/ircomm_tty.c
77499@@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
77500 add_wait_queue(&self->open_wait, &wait);
77501
77502 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
77503- __FILE__,__LINE__, tty->driver->name, self->open_count );
77504+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
77505
77506 /* As far as I can see, we protect open_count - Jean II */
77507 spin_lock_irqsave(&self->spinlock, flags);
77508 if (!tty_hung_up_p(filp)) {
77509 extra_count = 1;
77510- self->open_count--;
77511+ local_dec(&self->open_count);
77512 }
77513 spin_unlock_irqrestore(&self->spinlock, flags);
77514- self->blocked_open++;
77515+ local_inc(&self->blocked_open);
77516
77517 while (1) {
77518 if (tty->termios->c_cflag & CBAUD) {
77519@@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
77520 }
77521
77522 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
77523- __FILE__,__LINE__, tty->driver->name, self->open_count );
77524+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
77525
77526 schedule();
77527 }
77528@@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
77529 if (extra_count) {
77530 /* ++ is not atomic, so this should be protected - Jean II */
77531 spin_lock_irqsave(&self->spinlock, flags);
77532- self->open_count++;
77533+ local_inc(&self->open_count);
77534 spin_unlock_irqrestore(&self->spinlock, flags);
77535 }
77536- self->blocked_open--;
77537+ local_dec(&self->blocked_open);
77538
77539 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
77540- __FILE__,__LINE__, tty->driver->name, self->open_count);
77541+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
77542
77543 if (!retval)
77544 self->flags |= ASYNC_NORMAL_ACTIVE;
77545@@ -412,14 +412,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
77546 }
77547 /* ++ is not atomic, so this should be protected - Jean II */
77548 spin_lock_irqsave(&self->spinlock, flags);
77549- self->open_count++;
77550+ local_inc(&self->open_count);
77551
77552 tty->driver_data = self;
77553 self->tty = tty;
77554 spin_unlock_irqrestore(&self->spinlock, flags);
77555
77556 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
77557- self->line, self->open_count);
77558+ self->line, local_read(&self->open_count));
77559
77560 /* Not really used by us, but lets do it anyway */
77561 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
77562@@ -505,7 +505,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
77563 return;
77564 }
77565
77566- if ((tty->count == 1) && (self->open_count != 1)) {
77567+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
77568 /*
77569 * Uh, oh. tty->count is 1, which means that the tty
77570 * structure will be freed. state->count should always
77571@@ -515,16 +515,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
77572 */
77573 IRDA_DEBUG(0, "%s(), bad serial port count; "
77574 "tty->count is 1, state->count is %d\n", __func__ ,
77575- self->open_count);
77576- self->open_count = 1;
77577+ local_read(&self->open_count));
77578+ local_set(&self->open_count, 1);
77579 }
77580
77581- if (--self->open_count < 0) {
77582+ if (local_dec_return(&self->open_count) < 0) {
77583 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
77584- __func__, self->line, self->open_count);
77585- self->open_count = 0;
77586+ __func__, self->line, local_read(&self->open_count));
77587+ local_set(&self->open_count, 0);
77588 }
77589- if (self->open_count) {
77590+ if (local_read(&self->open_count)) {
77591 spin_unlock_irqrestore(&self->spinlock, flags);
77592
77593 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
77594@@ -556,7 +556,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
77595 tty->closing = 0;
77596 self->tty = NULL;
77597
77598- if (self->blocked_open) {
77599+ if (local_read(&self->blocked_open)) {
77600 if (self->close_delay)
77601 schedule_timeout_interruptible(self->close_delay);
77602 wake_up_interruptible(&self->open_wait);
77603@@ -1008,7 +1008,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
77604 spin_lock_irqsave(&self->spinlock, flags);
77605 self->flags &= ~ASYNC_NORMAL_ACTIVE;
77606 self->tty = NULL;
77607- self->open_count = 0;
77608+ local_set(&self->open_count, 0);
77609 spin_unlock_irqrestore(&self->spinlock, flags);
77610
77611 wake_up_interruptible(&self->open_wait);
77612@@ -1355,7 +1355,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
77613 seq_putc(m, '\n');
77614
77615 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
77616- seq_printf(m, "Open count: %d\n", self->open_count);
77617+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
77618 seq_printf(m, "Max data size: %d\n", self->max_data_size);
77619 seq_printf(m, "Max header size: %d\n", self->max_header_size);
77620
77621diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
77622index cd6f7a9..e63fe89 100644
77623--- a/net/iucv/af_iucv.c
77624+++ b/net/iucv/af_iucv.c
77625@@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
77626
77627 write_lock_bh(&iucv_sk_list.lock);
77628
77629- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
77630+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
77631 while (__iucv_get_sock_by_name(name)) {
77632 sprintf(name, "%08x",
77633- atomic_inc_return(&iucv_sk_list.autobind_name));
77634+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
77635 }
77636
77637 write_unlock_bh(&iucv_sk_list.lock);
77638diff --git a/net/key/af_key.c b/net/key/af_key.c
77639index 34e4185..8823368 100644
77640--- a/net/key/af_key.c
77641+++ b/net/key/af_key.c
77642@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
77643 static u32 get_acqseq(void)
77644 {
77645 u32 res;
77646- static atomic_t acqseq;
77647+ static atomic_unchecked_t acqseq;
77648
77649 do {
77650- res = atomic_inc_return(&acqseq);
77651+ res = atomic_inc_return_unchecked(&acqseq);
77652 } while (!res);
77653 return res;
77654 }
77655diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
77656index 35e1e4b..9275471 100644
77657--- a/net/l2tp/l2tp_ip6.c
77658+++ b/net/l2tp/l2tp_ip6.c
77659@@ -410,6 +410,7 @@ static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
77660 lsa->l2tp_family = AF_INET6;
77661 lsa->l2tp_flowinfo = 0;
77662 lsa->l2tp_scope_id = 0;
77663+ lsa->l2tp_unused = 0;
77664 if (peer) {
77665 if (!lsk->peer_conn_id)
77666 return -ENOTCONN;
77667diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
77668index fe5453c..a13c3e23 100644
77669--- a/net/llc/af_llc.c
77670+++ b/net/llc/af_llc.c
77671@@ -969,14 +969,13 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
77672 struct sockaddr_llc sllc;
77673 struct sock *sk = sock->sk;
77674 struct llc_sock *llc = llc_sk(sk);
77675- int rc = 0;
77676+ int rc = -EBADF;
77677
77678 memset(&sllc, 0, sizeof(sllc));
77679 lock_sock(sk);
77680 if (sock_flag(sk, SOCK_ZAPPED))
77681 goto out;
77682 *uaddrlen = sizeof(sllc);
77683- memset(uaddr, 0, *uaddrlen);
77684 if (peer) {
77685 rc = -ENOTCONN;
77686 if (sk->sk_state != TCP_ESTABLISHED)
77687diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
77688index 3f3cd50..d2cf249 100644
77689--- a/net/mac80211/ieee80211_i.h
77690+++ b/net/mac80211/ieee80211_i.h
77691@@ -28,6 +28,7 @@
77692 #include <net/ieee80211_radiotap.h>
77693 #include <net/cfg80211.h>
77694 #include <net/mac80211.h>
77695+#include <asm/local.h>
77696 #include "key.h"
77697 #include "sta_info.h"
77698
77699@@ -863,7 +864,7 @@ struct ieee80211_local {
77700 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
77701 spinlock_t queue_stop_reason_lock;
77702
77703- int open_count;
77704+ local_t open_count;
77705 int monitors, cooked_mntrs;
77706 /* number of interfaces with corresponding FIF_ flags */
77707 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
77708diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
77709index 8664111..1d6a065 100644
77710--- a/net/mac80211/iface.c
77711+++ b/net/mac80211/iface.c
77712@@ -328,7 +328,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
77713 break;
77714 }
77715
77716- if (local->open_count == 0) {
77717+ if (local_read(&local->open_count) == 0) {
77718 res = drv_start(local);
77719 if (res)
77720 goto err_del_bss;
77721@@ -371,7 +371,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
77722 break;
77723 }
77724
77725- if (local->monitors == 0 && local->open_count == 0) {
77726+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
77727 res = ieee80211_add_virtual_monitor(local);
77728 if (res)
77729 goto err_stop;
77730@@ -468,7 +468,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
77731 mutex_unlock(&local->mtx);
77732
77733 if (coming_up)
77734- local->open_count++;
77735+ local_inc(&local->open_count);
77736
77737 if (hw_reconf_flags)
77738 ieee80211_hw_config(local, hw_reconf_flags);
77739@@ -481,7 +481,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
77740 err_del_interface:
77741 drv_remove_interface(local, sdata);
77742 err_stop:
77743- if (!local->open_count)
77744+ if (!local_read(&local->open_count))
77745 drv_stop(local);
77746 err_del_bss:
77747 sdata->bss = NULL;
77748@@ -613,7 +613,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
77749 }
77750
77751 if (going_down)
77752- local->open_count--;
77753+ local_dec(&local->open_count);
77754
77755 switch (sdata->vif.type) {
77756 case NL80211_IFTYPE_AP_VLAN:
77757@@ -685,7 +685,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
77758
77759 ieee80211_recalc_ps(local, -1);
77760
77761- if (local->open_count == 0) {
77762+ if (local_read(&local->open_count) == 0) {
77763 if (local->ops->napi_poll)
77764 napi_disable(&local->napi);
77765 ieee80211_clear_tx_pending(local);
77766@@ -717,7 +717,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
77767 }
77768 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
77769
77770- if (local->monitors == local->open_count && local->monitors > 0)
77771+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
77772 ieee80211_add_virtual_monitor(local);
77773 }
77774
77775diff --git a/net/mac80211/main.c b/net/mac80211/main.c
77776index f5548e9..474a15f 100644
77777--- a/net/mac80211/main.c
77778+++ b/net/mac80211/main.c
77779@@ -166,7 +166,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
77780 local->hw.conf.power_level = power;
77781 }
77782
77783- if (changed && local->open_count) {
77784+ if (changed && local_read(&local->open_count)) {
77785 ret = drv_config(local, changed);
77786 /*
77787 * Goal:
77788diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
77789index af1c4e2..24dbbe3 100644
77790--- a/net/mac80211/pm.c
77791+++ b/net/mac80211/pm.c
77792@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
77793 struct ieee80211_sub_if_data *sdata;
77794 struct sta_info *sta;
77795
77796- if (!local->open_count)
77797+ if (!local_read(&local->open_count))
77798 goto suspend;
77799
77800 ieee80211_scan_cancel(local);
77801@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
77802 cancel_work_sync(&local->dynamic_ps_enable_work);
77803 del_timer_sync(&local->dynamic_ps_timer);
77804
77805- local->wowlan = wowlan && local->open_count;
77806+ local->wowlan = wowlan && local_read(&local->open_count);
77807 if (local->wowlan) {
77808 int err = drv_suspend(local, wowlan);
77809 if (err < 0) {
77810@@ -132,7 +132,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
77811 drv_remove_interface(local, sdata);
77812
77813 /* stop hardware - this must stop RX */
77814- if (local->open_count)
77815+ if (local_read(&local->open_count))
77816 ieee80211_stop_device(local);
77817
77818 suspend:
77819diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
77820index 3313c11..bec9f17 100644
77821--- a/net/mac80211/rate.c
77822+++ b/net/mac80211/rate.c
77823@@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
77824
77825 ASSERT_RTNL();
77826
77827- if (local->open_count)
77828+ if (local_read(&local->open_count))
77829 return -EBUSY;
77830
77831 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
77832diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
77833index c97a065..ff61928 100644
77834--- a/net/mac80211/rc80211_pid_debugfs.c
77835+++ b/net/mac80211/rc80211_pid_debugfs.c
77836@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
77837
77838 spin_unlock_irqrestore(&events->lock, status);
77839
77840- if (copy_to_user(buf, pb, p))
77841+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
77842 return -EFAULT;
77843
77844 return p;
77845diff --git a/net/mac80211/util.c b/net/mac80211/util.c
77846index f564b5e..22fee47 100644
77847--- a/net/mac80211/util.c
77848+++ b/net/mac80211/util.c
77849@@ -1224,7 +1224,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
77850 }
77851 #endif
77852 /* everything else happens only if HW was up & running */
77853- if (!local->open_count)
77854+ if (!local_read(&local->open_count))
77855 goto wake_up;
77856
77857 /*
77858diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
77859index 209c1ed..39484dc 100644
77860--- a/net/netfilter/Kconfig
77861+++ b/net/netfilter/Kconfig
77862@@ -851,6 +851,16 @@ config NETFILTER_XT_MATCH_ESP
77863
77864 To compile it as a module, choose M here. If unsure, say N.
77865
77866+config NETFILTER_XT_MATCH_GRADM
77867+ tristate '"gradm" match support'
77868+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
77869+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
77870+ ---help---
77871+ The gradm match allows to match on grsecurity RBAC being enabled.
77872+ It is useful when iptables rules are applied early on bootup to
77873+ prevent connections to the machine (except from a trusted host)
77874+ while the RBAC system is disabled.
77875+
77876 config NETFILTER_XT_MATCH_HASHLIMIT
77877 tristate '"hashlimit" match support'
77878 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
77879diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
77880index 4e7960c..89e48d4 100644
77881--- a/net/netfilter/Makefile
77882+++ b/net/netfilter/Makefile
77883@@ -87,6 +87,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
77884 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
77885 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
77886 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
77887+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
77888 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
77889 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
77890 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
77891diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
77892index 1548df9..98ad9b4 100644
77893--- a/net/netfilter/ipvs/ip_vs_conn.c
77894+++ b/net/netfilter/ipvs/ip_vs_conn.c
77895@@ -557,7 +557,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
77896 /* Increase the refcnt counter of the dest */
77897 atomic_inc(&dest->refcnt);
77898
77899- conn_flags = atomic_read(&dest->conn_flags);
77900+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
77901 if (cp->protocol != IPPROTO_UDP)
77902 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
77903 flags = cp->flags;
77904@@ -902,7 +902,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
77905 atomic_set(&cp->refcnt, 1);
77906
77907 atomic_set(&cp->n_control, 0);
77908- atomic_set(&cp->in_pkts, 0);
77909+ atomic_set_unchecked(&cp->in_pkts, 0);
77910
77911 atomic_inc(&ipvs->conn_count);
77912 if (flags & IP_VS_CONN_F_NO_CPORT)
77913@@ -1183,7 +1183,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
77914
77915 /* Don't drop the entry if its number of incoming packets is not
77916 located in [0, 8] */
77917- i = atomic_read(&cp->in_pkts);
77918+ i = atomic_read_unchecked(&cp->in_pkts);
77919 if (i > 8 || i < 0) return 0;
77920
77921 if (!todrop_rate[i]) return 0;
77922diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
77923index a54b018c..07e0120 100644
77924--- a/net/netfilter/ipvs/ip_vs_core.c
77925+++ b/net/netfilter/ipvs/ip_vs_core.c
77926@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
77927 ret = cp->packet_xmit(skb, cp, pd->pp);
77928 /* do not touch skb anymore */
77929
77930- atomic_inc(&cp->in_pkts);
77931+ atomic_inc_unchecked(&cp->in_pkts);
77932 ip_vs_conn_put(cp);
77933 return ret;
77934 }
77935@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
77936 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
77937 pkts = sysctl_sync_threshold(ipvs);
77938 else
77939- pkts = atomic_add_return(1, &cp->in_pkts);
77940+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
77941
77942 if (ipvs->sync_state & IP_VS_STATE_MASTER)
77943 ip_vs_sync_conn(net, cp, pkts);
77944diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
77945index 84444dd..f91c066 100644
77946--- a/net/netfilter/ipvs/ip_vs_ctl.c
77947+++ b/net/netfilter/ipvs/ip_vs_ctl.c
77948@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
77949 ip_vs_rs_hash(ipvs, dest);
77950 write_unlock_bh(&ipvs->rs_lock);
77951 }
77952- atomic_set(&dest->conn_flags, conn_flags);
77953+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
77954
77955 /* bind the service */
77956 if (!dest->svc) {
77957@@ -2074,7 +2074,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
77958 " %-7s %-6d %-10d %-10d\n",
77959 &dest->addr.in6,
77960 ntohs(dest->port),
77961- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
77962+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
77963 atomic_read(&dest->weight),
77964 atomic_read(&dest->activeconns),
77965 atomic_read(&dest->inactconns));
77966@@ -2085,7 +2085,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
77967 "%-7s %-6d %-10d %-10d\n",
77968 ntohl(dest->addr.ip),
77969 ntohs(dest->port),
77970- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
77971+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
77972 atomic_read(&dest->weight),
77973 atomic_read(&dest->activeconns),
77974 atomic_read(&dest->inactconns));
77975@@ -2555,7 +2555,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
77976
77977 entry.addr = dest->addr.ip;
77978 entry.port = dest->port;
77979- entry.conn_flags = atomic_read(&dest->conn_flags);
77980+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
77981 entry.weight = atomic_read(&dest->weight);
77982 entry.u_threshold = dest->u_threshold;
77983 entry.l_threshold = dest->l_threshold;
77984@@ -2759,6 +2759,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
77985 {
77986 struct ip_vs_timeout_user t;
77987
77988+ memset(&t, 0, sizeof(t));
77989 __ip_vs_get_timeouts(net, &t);
77990 if (copy_to_user(user, &t, sizeof(t)) != 0)
77991 ret = -EFAULT;
77992@@ -3089,7 +3090,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
77993 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
77994 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
77995 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
77996- (atomic_read(&dest->conn_flags) &
77997+ (atomic_read_unchecked(&dest->conn_flags) &
77998 IP_VS_CONN_F_FWD_MASK)) ||
77999 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
78000 atomic_read(&dest->weight)) ||
78001diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
78002index effa10c..9058928 100644
78003--- a/net/netfilter/ipvs/ip_vs_sync.c
78004+++ b/net/netfilter/ipvs/ip_vs_sync.c
78005@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
78006 cp = cp->control;
78007 if (cp) {
78008 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
78009- pkts = atomic_add_return(1, &cp->in_pkts);
78010+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
78011 else
78012 pkts = sysctl_sync_threshold(ipvs);
78013 ip_vs_sync_conn(net, cp->control, pkts);
78014@@ -758,7 +758,7 @@ control:
78015 if (!cp)
78016 return;
78017 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
78018- pkts = atomic_add_return(1, &cp->in_pkts);
78019+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
78020 else
78021 pkts = sysctl_sync_threshold(ipvs);
78022 goto sloop;
78023@@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
78024
78025 if (opt)
78026 memcpy(&cp->in_seq, opt, sizeof(*opt));
78027- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
78028+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
78029 cp->state = state;
78030 cp->old_state = cp->state;
78031 /*
78032diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
78033index 7fd66de..e6fb361 100644
78034--- a/net/netfilter/ipvs/ip_vs_xmit.c
78035+++ b/net/netfilter/ipvs/ip_vs_xmit.c
78036@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
78037 else
78038 rc = NF_ACCEPT;
78039 /* do not touch skb anymore */
78040- atomic_inc(&cp->in_pkts);
78041+ atomic_inc_unchecked(&cp->in_pkts);
78042 goto out;
78043 }
78044
78045@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
78046 else
78047 rc = NF_ACCEPT;
78048 /* do not touch skb anymore */
78049- atomic_inc(&cp->in_pkts);
78050+ atomic_inc_unchecked(&cp->in_pkts);
78051 goto out;
78052 }
78053
78054diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
78055index ac3af97..c134c21 100644
78056--- a/net/netfilter/nf_conntrack_core.c
78057+++ b/net/netfilter/nf_conntrack_core.c
78058@@ -1530,6 +1530,10 @@ err_proto:
78059 #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
78060 #define DYING_NULLS_VAL ((1<<30)+1)
78061
78062+#ifdef CONFIG_GRKERNSEC_HIDESYM
78063+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
78064+#endif
78065+
78066 static int nf_conntrack_init_net(struct net *net)
78067 {
78068 int ret;
78069@@ -1543,7 +1547,11 @@ static int nf_conntrack_init_net(struct net *net)
78070 goto err_stat;
78071 }
78072
78073+#ifdef CONFIG_GRKERNSEC_HIDESYM
78074+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
78075+#else
78076 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
78077+#endif
78078 if (!net->ct.slabname) {
78079 ret = -ENOMEM;
78080 goto err_slabname;
78081diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
78082index 3c3cfc0..7a6ea1a 100644
78083--- a/net/netfilter/nfnetlink_log.c
78084+++ b/net/netfilter/nfnetlink_log.c
78085@@ -70,7 +70,7 @@ struct nfulnl_instance {
78086 };
78087
78088 static DEFINE_SPINLOCK(instances_lock);
78089-static atomic_t global_seq;
78090+static atomic_unchecked_t global_seq;
78091
78092 #define INSTANCE_BUCKETS 16
78093 static struct hlist_head instance_table[INSTANCE_BUCKETS];
78094@@ -517,7 +517,7 @@ __build_packet_message(struct nfulnl_instance *inst,
78095 /* global sequence number */
78096 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
78097 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
78098- htonl(atomic_inc_return(&global_seq))))
78099+ htonl(atomic_inc_return_unchecked(&global_seq))))
78100 goto nla_put_failure;
78101
78102 if (data_len) {
78103diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
78104new file mode 100644
78105index 0000000..6905327
78106--- /dev/null
78107+++ b/net/netfilter/xt_gradm.c
78108@@ -0,0 +1,51 @@
78109+/*
78110+ * gradm match for netfilter
78111