]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9-3.3.1-201204081847.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-3.3.1-201204081847.patch
CommitLineData
d40be39a
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 0c083c5..bf13011 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,9 +51,11 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37@@ -69,6 +74,7 @@ Image
38 Module.markers
39 Module.symvers
40 PENDING
41+PERF*
42 SCCS
43 System.map*
44 TAGS
45@@ -92,19 +98,24 @@ bounds.h
46 bsetup
47 btfixupprep
48 build
49+builtin-policy.h
50 bvmlinux
51 bzImage*
52 capability_names.h
53 capflags.c
54 classlist.h*
55+clut_vga16.c
56+common-cmds.h
57 comp*.log
58 compile.h*
59 conf
60 config
61 config-*
62 config_data.h*
63+config.c
64 config.mak
65 config.mak.autogen
66+config.tmp
67 conmakehash
68 consolemap_deftbl.c*
69 cpustr.h
70@@ -115,9 +126,11 @@ devlist.h*
71 dnotify_test
72 docproc
73 dslm
74+dtc-lexer.lex.c
75 elf2ecoff
76 elfconfig.h*
77 evergreen_reg_safe.h
78+exception_policy.conf
79 fixdep
80 flask.h
81 fore200e_mkfirm
82@@ -125,12 +138,15 @@ fore200e_pca_fw.c*
83 gconf
84 gconf.glade.h
85 gen-devlist
86+gen-kdb_cmds.c
87 gen_crc32table
88 gen_init_cpio
89 generated
90 genheaders
91 genksyms
92 *_gray256.c
93+hash
94+hid-example
95 hpet_example
96 hugepage-mmap
97 hugepage-shm
98@@ -145,7 +161,7 @@ int32.c
99 int4.c
100 int8.c
101 kallsyms
102-kconfig
103+kern_constants.h
104 keywords.c
105 ksym.c*
106 ksym.h*
107@@ -153,7 +169,7 @@ kxgettext
108 lkc_defs.h
109 lex.c
110 lex.*.c
111-linux
112+lib1funcs.S
113 logo_*.c
114 logo_*_clut224.c
115 logo_*_mono.c
116@@ -165,14 +181,15 @@ machtypes.h
117 map
118 map_hugetlb
119 maui_boot.h
120-media
121 mconf
122+mdp
123 miboot*
124 mk_elfconfig
125 mkboot
126 mkbugboot
127 mkcpustr
128 mkdep
129+mkpiggy
130 mkprep
131 mkregtable
132 mktables
133@@ -208,6 +225,7 @@ r300_reg_safe.h
134 r420_reg_safe.h
135 r600_reg_safe.h
136 recordmcount
137+regdb.c
138 relocs
139 rlim_names.h
140 rn50_reg_safe.h
141@@ -218,6 +236,7 @@ setup
142 setup.bin
143 setup.elf
144 sImage
145+slabinfo
146 sm_tbl*
147 split-include
148 syscalltab.h
149@@ -228,6 +247,7 @@ tftpboot.img
150 timeconst.h
151 times.h*
152 trix_boot.h
153+user_constants.h
154 utsrelease.h*
155 vdso-syms.lds
156 vdso.lds
157@@ -245,7 +265,9 @@ vmlinux
158 vmlinux-*
159 vmlinux.aout
160 vmlinux.bin.all
161+vmlinux.bin.bz2
162 vmlinux.lds
163+vmlinux.relocs
164 vmlinuz
165 voffset.h
166 vsyscall.lds
167@@ -253,9 +275,11 @@ vsyscall_32.lds
168 wanxlfw.inc
169 uImage
170 unifdef
171+utsrelease.h
172 wakeup.bin
173 wakeup.elf
174 wakeup.lds
175 zImage*
176 zconf.hash.c
177+zconf.lex.c
178 zoffset.h
179diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
180index d99fd9c..8689fef 100644
181--- a/Documentation/kernel-parameters.txt
182+++ b/Documentation/kernel-parameters.txt
183@@ -1977,6 +1977,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
184 the specified number of seconds. This is to be used if
185 your oopses keep scrolling off the screen.
186
187+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
188+ virtualization environments that don't cope well with the
189+ expand down segment used by UDEREF on X86-32 or the frequent
190+ page table updates on X86-64.
191+
192+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
193+
194 pcbit= [HW,ISDN]
195
196 pcd. [PARIDE]
197diff --git a/Makefile b/Makefile
198index 026a227..990f035 100644
199--- a/Makefile
200+++ b/Makefile
201@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
202
203 HOSTCC = gcc
204 HOSTCXX = g++
205-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
206-HOSTCXXFLAGS = -O2
207+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
208+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
209+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
210
211 # Decide whether to build built-in, modular, or both.
212 # Normally, just do built-in.
213@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
214 # Rules shared between *config targets and build targets
215
216 # Basic helpers built in scripts/
217-PHONY += scripts_basic
218-scripts_basic:
219+PHONY += scripts_basic gcc-plugins
220+scripts_basic: gcc-plugins
221 $(Q)$(MAKE) $(build)=scripts/basic
222 $(Q)rm -f .tmp_quiet_recordmcount
223
224@@ -564,6 +565,53 @@ else
225 KBUILD_CFLAGS += -O2
226 endif
227
228+ifndef DISABLE_PAX_PLUGINS
229+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
230+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
231+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
232+endif
233+ifdef CONFIG_PAX_MEMORY_STACKLEAK
234+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
235+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
236+endif
237+ifdef CONFIG_KALLOCSTAT_PLUGIN
238+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
239+endif
240+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
241+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
242+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
243+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
244+endif
245+ifdef CONFIG_CHECKER_PLUGIN
246+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
247+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
248+endif
249+endif
250+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
251+ifdef CONFIG_PAX_SIZE_OVERFLOW
252+SIZE_OVERFLOW_PLUGIN := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
253+endif
254+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
255+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN)
256+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
257+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
258+ifeq ($(KBUILD_EXTMOD),)
259+gcc-plugins:
260+ $(Q)$(MAKE) $(build)=tools/gcc
261+else
262+gcc-plugins: ;
263+endif
264+else
265+gcc-plugins:
266+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
267+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
268+else
269+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
270+endif
271+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
272+endif
273+endif
274+
275 include $(srctree)/arch/$(SRCARCH)/Makefile
276
277 ifneq ($(CONFIG_FRAME_WARN),0)
278@@ -708,7 +756,7 @@ export mod_strip_cmd
279
280
281 ifeq ($(KBUILD_EXTMOD),)
282-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
283+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
284
285 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
286 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
287@@ -932,6 +980,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
288
289 # The actual objects are generated when descending,
290 # make sure no implicit rule kicks in
291+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
292+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
293 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
294
295 # Handle descending into subdirectories listed in $(vmlinux-dirs)
296@@ -941,7 +991,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
297 # Error messages still appears in the original language
298
299 PHONY += $(vmlinux-dirs)
300-$(vmlinux-dirs): prepare scripts
301+$(vmlinux-dirs): gcc-plugins prepare scripts
302 $(Q)$(MAKE) $(build)=$@
303
304 # Store (new) KERNELRELASE string in include/config/kernel.release
305@@ -985,6 +1035,7 @@ prepare0: archprepare FORCE
306 $(Q)$(MAKE) $(build)=.
307
308 # All the preparing..
309+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
310 prepare: prepare0
311
312 # Generate some files
313@@ -1089,6 +1140,8 @@ all: modules
314 # using awk while concatenating to the final file.
315
316 PHONY += modules
317+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
318+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
319 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
320 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
321 @$(kecho) ' Building modules, stage 2.';
322@@ -1104,7 +1157,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
323
324 # Target to prepare building external modules
325 PHONY += modules_prepare
326-modules_prepare: prepare scripts
327+modules_prepare: gcc-plugins prepare scripts
328
329 # Target to install modules
330 PHONY += modules_install
331@@ -1201,6 +1254,7 @@ distclean: mrproper
332 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
333 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
334 -o -name '.*.rej' \
335+ -o -name '.*.rej' -o -name '*.so' \
336 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
337 -type f -print | xargs rm -f
338
339@@ -1361,6 +1415,8 @@ PHONY += $(module-dirs) modules
340 $(module-dirs): crmodverdir $(objtree)/Module.symvers
341 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
342
343+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
344+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
345 modules: $(module-dirs)
346 @$(kecho) ' Building modules, stage 2.';
347 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
348@@ -1487,17 +1543,21 @@ else
349 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
350 endif
351
352-%.s: %.c prepare scripts FORCE
353+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
354+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
355+%.s: %.c gcc-plugins prepare scripts FORCE
356 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
357 %.i: %.c prepare scripts FORCE
358 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
359-%.o: %.c prepare scripts FORCE
360+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
361+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
362+%.o: %.c gcc-plugins prepare scripts FORCE
363 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
364 %.lst: %.c prepare scripts FORCE
365 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
366-%.s: %.S prepare scripts FORCE
367+%.s: %.S gcc-plugins prepare scripts FORCE
368 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
369-%.o: %.S prepare scripts FORCE
370+%.o: %.S gcc-plugins prepare scripts FORCE
371 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
372 %.symtypes: %.c prepare scripts FORCE
373 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
374@@ -1507,11 +1567,15 @@ endif
375 $(cmd_crmodverdir)
376 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
377 $(build)=$(build-dir)
378-%/: prepare scripts FORCE
379+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
380+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
381+%/: gcc-plugins prepare scripts FORCE
382 $(cmd_crmodverdir)
383 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
384 $(build)=$(build-dir)
385-%.ko: prepare scripts FORCE
386+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
387+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
388+%.ko: gcc-plugins prepare scripts FORCE
389 $(cmd_crmodverdir)
390 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
391 $(build)=$(build-dir) $(@:.ko=.o)
392diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
393index 640f909..48b6597 100644
394--- a/arch/alpha/include/asm/atomic.h
395+++ b/arch/alpha/include/asm/atomic.h
396@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
397 #define atomic_dec(v) atomic_sub(1,(v))
398 #define atomic64_dec(v) atomic64_sub(1,(v))
399
400+#define atomic64_read_unchecked(v) atomic64_read(v)
401+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
402+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
403+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
404+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
405+#define atomic64_inc_unchecked(v) atomic64_inc(v)
406+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
407+#define atomic64_dec_unchecked(v) atomic64_dec(v)
408+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
409+
410 #define smp_mb__before_atomic_dec() smp_mb()
411 #define smp_mb__after_atomic_dec() smp_mb()
412 #define smp_mb__before_atomic_inc() smp_mb()
413diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
414index ad368a9..fbe0f25 100644
415--- a/arch/alpha/include/asm/cache.h
416+++ b/arch/alpha/include/asm/cache.h
417@@ -4,19 +4,19 @@
418 #ifndef __ARCH_ALPHA_CACHE_H
419 #define __ARCH_ALPHA_CACHE_H
420
421+#include <linux/const.h>
422
423 /* Bytes per L1 (data) cache line. */
424 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
425-# define L1_CACHE_BYTES 64
426 # define L1_CACHE_SHIFT 6
427 #else
428 /* Both EV4 and EV5 are write-through, read-allocate,
429 direct-mapped, physical.
430 */
431-# define L1_CACHE_BYTES 32
432 # define L1_CACHE_SHIFT 5
433 #endif
434
435+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
436 #define SMP_CACHE_BYTES L1_CACHE_BYTES
437
438 #endif
439diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
440index da5449e..7418343 100644
441--- a/arch/alpha/include/asm/elf.h
442+++ b/arch/alpha/include/asm/elf.h
443@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
444
445 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
446
447+#ifdef CONFIG_PAX_ASLR
448+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
449+
450+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
451+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
452+#endif
453+
454 /* $0 is set by ld.so to a pointer to a function which might be
455 registered using atexit. This provides a mean for the dynamic
456 linker to call DT_FINI functions for shared libraries that have
457diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
458index de98a73..bd4f1f8 100644
459--- a/arch/alpha/include/asm/pgtable.h
460+++ b/arch/alpha/include/asm/pgtable.h
461@@ -101,6 +101,17 @@ struct vm_area_struct;
462 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
463 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
464 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
465+
466+#ifdef CONFIG_PAX_PAGEEXEC
467+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
468+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
469+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
470+#else
471+# define PAGE_SHARED_NOEXEC PAGE_SHARED
472+# define PAGE_COPY_NOEXEC PAGE_COPY
473+# define PAGE_READONLY_NOEXEC PAGE_READONLY
474+#endif
475+
476 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
477
478 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
479diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
480index 2fd00b7..cfd5069 100644
481--- a/arch/alpha/kernel/module.c
482+++ b/arch/alpha/kernel/module.c
483@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
484
485 /* The small sections were sorted to the end of the segment.
486 The following should definitely cover them. */
487- gp = (u64)me->module_core + me->core_size - 0x8000;
488+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
489 got = sechdrs[me->arch.gotsecindex].sh_addr;
490
491 for (i = 0; i < n; i++) {
492diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
493index 01e8715..be0e80f 100644
494--- a/arch/alpha/kernel/osf_sys.c
495+++ b/arch/alpha/kernel/osf_sys.c
496@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
497 /* At this point: (!vma || addr < vma->vm_end). */
498 if (limit - len < addr)
499 return -ENOMEM;
500- if (!vma || addr + len <= vma->vm_start)
501+ if (check_heap_stack_gap(vma, addr, len))
502 return addr;
503 addr = vma->vm_end;
504 vma = vma->vm_next;
505@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
506 merely specific addresses, but regions of memory -- perhaps
507 this feature should be incorporated into all ports? */
508
509+#ifdef CONFIG_PAX_RANDMMAP
510+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
511+#endif
512+
513 if (addr) {
514 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
515 if (addr != (unsigned long) -ENOMEM)
516@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
517 }
518
519 /* Next, try allocating at TASK_UNMAPPED_BASE. */
520- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
521- len, limit);
522+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
523+
524 if (addr != (unsigned long) -ENOMEM)
525 return addr;
526
527diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
528index fadd5f8..904e73a 100644
529--- a/arch/alpha/mm/fault.c
530+++ b/arch/alpha/mm/fault.c
531@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
532 __reload_thread(pcb);
533 }
534
535+#ifdef CONFIG_PAX_PAGEEXEC
536+/*
537+ * PaX: decide what to do with offenders (regs->pc = fault address)
538+ *
539+ * returns 1 when task should be killed
540+ * 2 when patched PLT trampoline was detected
541+ * 3 when unpatched PLT trampoline was detected
542+ */
543+static int pax_handle_fetch_fault(struct pt_regs *regs)
544+{
545+
546+#ifdef CONFIG_PAX_EMUPLT
547+ int err;
548+
549+ do { /* PaX: patched PLT emulation #1 */
550+ unsigned int ldah, ldq, jmp;
551+
552+ err = get_user(ldah, (unsigned int *)regs->pc);
553+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
554+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
555+
556+ if (err)
557+ break;
558+
559+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
560+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
561+ jmp == 0x6BFB0000U)
562+ {
563+ unsigned long r27, addr;
564+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
565+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
566+
567+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
568+ err = get_user(r27, (unsigned long *)addr);
569+ if (err)
570+ break;
571+
572+ regs->r27 = r27;
573+ regs->pc = r27;
574+ return 2;
575+ }
576+ } while (0);
577+
578+ do { /* PaX: patched PLT emulation #2 */
579+ unsigned int ldah, lda, br;
580+
581+ err = get_user(ldah, (unsigned int *)regs->pc);
582+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
583+ err |= get_user(br, (unsigned int *)(regs->pc+8));
584+
585+ if (err)
586+ break;
587+
588+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
589+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
590+ (br & 0xFFE00000U) == 0xC3E00000U)
591+ {
592+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
593+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
594+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
595+
596+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
597+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
598+ return 2;
599+ }
600+ } while (0);
601+
602+ do { /* PaX: unpatched PLT emulation */
603+ unsigned int br;
604+
605+ err = get_user(br, (unsigned int *)regs->pc);
606+
607+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
608+ unsigned int br2, ldq, nop, jmp;
609+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
610+
611+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
612+ err = get_user(br2, (unsigned int *)addr);
613+ err |= get_user(ldq, (unsigned int *)(addr+4));
614+ err |= get_user(nop, (unsigned int *)(addr+8));
615+ err |= get_user(jmp, (unsigned int *)(addr+12));
616+ err |= get_user(resolver, (unsigned long *)(addr+16));
617+
618+ if (err)
619+ break;
620+
621+ if (br2 == 0xC3600000U &&
622+ ldq == 0xA77B000CU &&
623+ nop == 0x47FF041FU &&
624+ jmp == 0x6B7B0000U)
625+ {
626+ regs->r28 = regs->pc+4;
627+ regs->r27 = addr+16;
628+ regs->pc = resolver;
629+ return 3;
630+ }
631+ }
632+ } while (0);
633+#endif
634+
635+ return 1;
636+}
637+
638+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
639+{
640+ unsigned long i;
641+
642+ printk(KERN_ERR "PAX: bytes at PC: ");
643+ for (i = 0; i < 5; i++) {
644+ unsigned int c;
645+ if (get_user(c, (unsigned int *)pc+i))
646+ printk(KERN_CONT "???????? ");
647+ else
648+ printk(KERN_CONT "%08x ", c);
649+ }
650+ printk("\n");
651+}
652+#endif
653
654 /*
655 * This routine handles page faults. It determines the address,
656@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
657 good_area:
658 si_code = SEGV_ACCERR;
659 if (cause < 0) {
660- if (!(vma->vm_flags & VM_EXEC))
661+ if (!(vma->vm_flags & VM_EXEC)) {
662+
663+#ifdef CONFIG_PAX_PAGEEXEC
664+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
665+ goto bad_area;
666+
667+ up_read(&mm->mmap_sem);
668+ switch (pax_handle_fetch_fault(regs)) {
669+
670+#ifdef CONFIG_PAX_EMUPLT
671+ case 2:
672+ case 3:
673+ return;
674+#endif
675+
676+ }
677+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
678+ do_group_exit(SIGKILL);
679+#else
680 goto bad_area;
681+#endif
682+
683+ }
684 } else if (!cause) {
685 /* Allow reads even for write-only mappings */
686 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
687diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
688index 86976d0..8e07f84 100644
689--- a/arch/arm/include/asm/atomic.h
690+++ b/arch/arm/include/asm/atomic.h
691@@ -15,6 +15,10 @@
692 #include <linux/types.h>
693 #include <asm/system.h>
694
695+#ifdef CONFIG_GENERIC_ATOMIC64
696+#include <asm-generic/atomic64.h>
697+#endif
698+
699 #define ATOMIC_INIT(i) { (i) }
700
701 #ifdef __KERNEL__
702@@ -25,7 +29,15 @@
703 * atomic_set() is the clrex or dummy strex done on every exception return.
704 */
705 #define atomic_read(v) (*(volatile int *)&(v)->counter)
706+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
707+{
708+ return v->counter;
709+}
710 #define atomic_set(v,i) (((v)->counter) = (i))
711+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
712+{
713+ v->counter = i;
714+}
715
716 #if __LINUX_ARM_ARCH__ >= 6
717
718@@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
719 int result;
720
721 __asm__ __volatile__("@ atomic_add\n"
722+"1: ldrex %1, [%3]\n"
723+" adds %0, %1, %4\n"
724+
725+#ifdef CONFIG_PAX_REFCOUNT
726+" bvc 3f\n"
727+"2: bkpt 0xf103\n"
728+"3:\n"
729+#endif
730+
731+" strex %1, %0, [%3]\n"
732+" teq %1, #0\n"
733+" bne 1b"
734+
735+#ifdef CONFIG_PAX_REFCOUNT
736+"\n4:\n"
737+ _ASM_EXTABLE(2b, 4b)
738+#endif
739+
740+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
741+ : "r" (&v->counter), "Ir" (i)
742+ : "cc");
743+}
744+
745+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
746+{
747+ unsigned long tmp;
748+ int result;
749+
750+ __asm__ __volatile__("@ atomic_add_unchecked\n"
751 "1: ldrex %0, [%3]\n"
752 " add %0, %0, %4\n"
753 " strex %1, %0, [%3]\n"
754@@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
755 smp_mb();
756
757 __asm__ __volatile__("@ atomic_add_return\n"
758+"1: ldrex %1, [%3]\n"
759+" adds %0, %1, %4\n"
760+
761+#ifdef CONFIG_PAX_REFCOUNT
762+" bvc 3f\n"
763+" mov %0, %1\n"
764+"2: bkpt 0xf103\n"
765+"3:\n"
766+#endif
767+
768+" strex %1, %0, [%3]\n"
769+" teq %1, #0\n"
770+" bne 1b"
771+
772+#ifdef CONFIG_PAX_REFCOUNT
773+"\n4:\n"
774+ _ASM_EXTABLE(2b, 4b)
775+#endif
776+
777+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
778+ : "r" (&v->counter), "Ir" (i)
779+ : "cc");
780+
781+ smp_mb();
782+
783+ return result;
784+}
785+
786+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
787+{
788+ unsigned long tmp;
789+ int result;
790+
791+ smp_mb();
792+
793+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
794 "1: ldrex %0, [%3]\n"
795 " add %0, %0, %4\n"
796 " strex %1, %0, [%3]\n"
797@@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
798 int result;
799
800 __asm__ __volatile__("@ atomic_sub\n"
801+"1: ldrex %1, [%3]\n"
802+" subs %0, %1, %4\n"
803+
804+#ifdef CONFIG_PAX_REFCOUNT
805+" bvc 3f\n"
806+"2: bkpt 0xf103\n"
807+"3:\n"
808+#endif
809+
810+" strex %1, %0, [%3]\n"
811+" teq %1, #0\n"
812+" bne 1b"
813+
814+#ifdef CONFIG_PAX_REFCOUNT
815+"\n4:\n"
816+ _ASM_EXTABLE(2b, 4b)
817+#endif
818+
819+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
820+ : "r" (&v->counter), "Ir" (i)
821+ : "cc");
822+}
823+
824+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
825+{
826+ unsigned long tmp;
827+ int result;
828+
829+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
830 "1: ldrex %0, [%3]\n"
831 " sub %0, %0, %4\n"
832 " strex %1, %0, [%3]\n"
833@@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
834 smp_mb();
835
836 __asm__ __volatile__("@ atomic_sub_return\n"
837-"1: ldrex %0, [%3]\n"
838-" sub %0, %0, %4\n"
839+"1: ldrex %1, [%3]\n"
840+" sub %0, %1, %4\n"
841+
842+#ifdef CONFIG_PAX_REFCOUNT
843+" bvc 3f\n"
844+" mov %0, %1\n"
845+"2: bkpt 0xf103\n"
846+"3:\n"
847+#endif
848+
849 " strex %1, %0, [%3]\n"
850 " teq %1, #0\n"
851 " bne 1b"
852+
853+#ifdef CONFIG_PAX_REFCOUNT
854+"\n4:\n"
855+ _ASM_EXTABLE(2b, 4b)
856+#endif
857+
858 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
859 : "r" (&v->counter), "Ir" (i)
860 : "cc");
861@@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
862 return oldval;
863 }
864
865+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
866+{
867+ unsigned long oldval, res;
868+
869+ smp_mb();
870+
871+ do {
872+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
873+ "ldrex %1, [%3]\n"
874+ "mov %0, #0\n"
875+ "teq %1, %4\n"
876+ "strexeq %0, %5, [%3]\n"
877+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
878+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
879+ : "cc");
880+ } while (res);
881+
882+ smp_mb();
883+
884+ return oldval;
885+}
886+
887 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
888 {
889 unsigned long tmp, tmp2;
890@@ -165,7 +307,9 @@ static inline int atomic_add_return(int i, atomic_t *v)
891
892 return val;
893 }
894+#define atomic_add_return_unchecked(i, v) atomic_add_return(i, v)
895 #define atomic_add(i, v) (void) atomic_add_return(i, v)
896+#define atomic_add_unchecked(i, v) (void) atomic_add_return_unchecked(i, v)
897
898 static inline int atomic_sub_return(int i, atomic_t *v)
899 {
900@@ -179,7 +323,9 @@ static inline int atomic_sub_return(int i, atomic_t *v)
901
902 return val;
903 }
904+#define atomic_sub_return_unchecked(i, v) atomic_sub_return(i, v)
905 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
906+#define atomic_sub_unchecked(i, v) (void) atomic_sub_return_unchecked(i, v)
907
908 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
909 {
910@@ -194,6 +340,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
911
912 return ret;
913 }
914+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg(v, o, n)
915
916 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
917 {
918@@ -207,6 +354,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
919 #endif /* __LINUX_ARM_ARCH__ */
920
921 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
922+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
923+{
924+ return xchg(&v->counter, new);
925+}
926
927 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
928 {
929@@ -219,11 +370,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
930 }
931
932 #define atomic_inc(v) atomic_add(1, v)
933+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
934+{
935+ atomic_add_unchecked(1, v);
936+}
937 #define atomic_dec(v) atomic_sub(1, v)
938+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
939+{
940+ atomic_sub_unchecked(1, v);
941+}
942
943 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
944+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
945+{
946+ return atomic_add_return_unchecked(1, v) == 0;
947+}
948 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
949 #define atomic_inc_return(v) (atomic_add_return(1, v))
950+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
951+{
952+ return atomic_add_return_unchecked(1, v);
953+}
954 #define atomic_dec_return(v) (atomic_sub_return(1, v))
955 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
956
957@@ -239,6 +406,14 @@ typedef struct {
958 u64 __aligned(8) counter;
959 } atomic64_t;
960
961+#ifdef CONFIG_PAX_REFCOUNT
962+typedef struct {
963+ u64 __aligned(8) counter;
964+} atomic64_unchecked_t;
965+#else
966+typedef atomic64_t atomic64_unchecked_t;
967+#endif
968+
969 #define ATOMIC64_INIT(i) { (i) }
970
971 static inline u64 atomic64_read(atomic64_t *v)
972@@ -254,6 +429,19 @@ static inline u64 atomic64_read(atomic64_t *v)
973 return result;
974 }
975
976+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
977+{
978+ u64 result;
979+
980+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
981+" ldrexd %0, %H0, [%1]"
982+ : "=&r" (result)
983+ : "r" (&v->counter), "Qo" (v->counter)
984+ );
985+
986+ return result;
987+}
988+
989 static inline void atomic64_set(atomic64_t *v, u64 i)
990 {
991 u64 tmp;
992@@ -268,6 +456,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
993 : "cc");
994 }
995
996+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
997+{
998+ u64 tmp;
999+
1000+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1001+"1: ldrexd %0, %H0, [%2]\n"
1002+" strexd %0, %3, %H3, [%2]\n"
1003+" teq %0, #0\n"
1004+" bne 1b"
1005+ : "=&r" (tmp), "=Qo" (v->counter)
1006+ : "r" (&v->counter), "r" (i)
1007+ : "cc");
1008+}
1009+
1010 static inline void atomic64_add(u64 i, atomic64_t *v)
1011 {
1012 u64 result;
1013@@ -276,6 +478,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1014 __asm__ __volatile__("@ atomic64_add\n"
1015 "1: ldrexd %0, %H0, [%3]\n"
1016 " adds %0, %0, %4\n"
1017+" adcs %H0, %H0, %H4\n"
1018+
1019+#ifdef CONFIG_PAX_REFCOUNT
1020+" bvc 3f\n"
1021+"2: bkpt 0xf103\n"
1022+"3:\n"
1023+#endif
1024+
1025+" strexd %1, %0, %H0, [%3]\n"
1026+" teq %1, #0\n"
1027+" bne 1b"
1028+
1029+#ifdef CONFIG_PAX_REFCOUNT
1030+"\n4:\n"
1031+ _ASM_EXTABLE(2b, 4b)
1032+#endif
1033+
1034+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1035+ : "r" (&v->counter), "r" (i)
1036+ : "cc");
1037+}
1038+
1039+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1040+{
1041+ u64 result;
1042+ unsigned long tmp;
1043+
1044+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1045+"1: ldrexd %0, %H0, [%3]\n"
1046+" adds %0, %0, %4\n"
1047 " adc %H0, %H0, %H4\n"
1048 " strexd %1, %0, %H0, [%3]\n"
1049 " teq %1, #0\n"
1050@@ -287,12 +519,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1051
1052 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1053 {
1054- u64 result;
1055- unsigned long tmp;
1056+ u64 result, tmp;
1057
1058 smp_mb();
1059
1060 __asm__ __volatile__("@ atomic64_add_return\n"
1061+"1: ldrexd %1, %H1, [%3]\n"
1062+" adds %0, %1, %4\n"
1063+" adcs %H0, %H1, %H4\n"
1064+
1065+#ifdef CONFIG_PAX_REFCOUNT
1066+" bvc 3f\n"
1067+" mov %0, %1\n"
1068+" mov %H0, %H1\n"
1069+"2: bkpt 0xf103\n"
1070+"3:\n"
1071+#endif
1072+
1073+" strexd %1, %0, %H0, [%3]\n"
1074+" teq %1, #0\n"
1075+" bne 1b"
1076+
1077+#ifdef CONFIG_PAX_REFCOUNT
1078+"\n4:\n"
1079+ _ASM_EXTABLE(2b, 4b)
1080+#endif
1081+
1082+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1083+ : "r" (&v->counter), "r" (i)
1084+ : "cc");
1085+
1086+ smp_mb();
1087+
1088+ return result;
1089+}
1090+
1091+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1092+{
1093+ u64 result;
1094+ unsigned long tmp;
1095+
1096+ smp_mb();
1097+
1098+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1099 "1: ldrexd %0, %H0, [%3]\n"
1100 " adds %0, %0, %4\n"
1101 " adc %H0, %H0, %H4\n"
1102@@ -316,6 +585,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1103 __asm__ __volatile__("@ atomic64_sub\n"
1104 "1: ldrexd %0, %H0, [%3]\n"
1105 " subs %0, %0, %4\n"
1106+" sbcs %H0, %H0, %H4\n"
1107+
1108+#ifdef CONFIG_PAX_REFCOUNT
1109+" bvc 3f\n"
1110+"2: bkpt 0xf103\n"
1111+"3:\n"
1112+#endif
1113+
1114+" strexd %1, %0, %H0, [%3]\n"
1115+" teq %1, #0\n"
1116+" bne 1b"
1117+
1118+#ifdef CONFIG_PAX_REFCOUNT
1119+"\n4:\n"
1120+ _ASM_EXTABLE(2b, 4b)
1121+#endif
1122+
1123+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1124+ : "r" (&v->counter), "r" (i)
1125+ : "cc");
1126+}
1127+
1128+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1129+{
1130+ u64 result;
1131+ unsigned long tmp;
1132+
1133+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1134+"1: ldrexd %0, %H0, [%3]\n"
1135+" subs %0, %0, %4\n"
1136 " sbc %H0, %H0, %H4\n"
1137 " strexd %1, %0, %H0, [%3]\n"
1138 " teq %1, #0\n"
1139@@ -327,18 +626,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1140
1141 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1142 {
1143- u64 result;
1144- unsigned long tmp;
1145+ u64 result, tmp;
1146
1147 smp_mb();
1148
1149 __asm__ __volatile__("@ atomic64_sub_return\n"
1150-"1: ldrexd %0, %H0, [%3]\n"
1151-" subs %0, %0, %4\n"
1152-" sbc %H0, %H0, %H4\n"
1153+"1: ldrexd %1, %H1, [%3]\n"
1154+" subs %0, %1, %4\n"
1155+" sbc %H0, %H1, %H4\n"
1156+
1157+#ifdef CONFIG_PAX_REFCOUNT
1158+" bvc 3f\n"
1159+" mov %0, %1\n"
1160+" mov %H0, %H1\n"
1161+"2: bkpt 0xf103\n"
1162+"3:\n"
1163+#endif
1164+
1165 " strexd %1, %0, %H0, [%3]\n"
1166 " teq %1, #0\n"
1167 " bne 1b"
1168+
1169+#ifdef CONFIG_PAX_REFCOUNT
1170+"\n4:\n"
1171+ _ASM_EXTABLE(2b, 4b)
1172+#endif
1173+
1174 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1175 : "r" (&v->counter), "r" (i)
1176 : "cc");
1177@@ -372,6 +685,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1178 return oldval;
1179 }
1180
1181+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1182+{
1183+ u64 oldval;
1184+ unsigned long res;
1185+
1186+ smp_mb();
1187+
1188+ do {
1189+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1190+ "ldrexd %1, %H1, [%3]\n"
1191+ "mov %0, #0\n"
1192+ "teq %1, %4\n"
1193+ "teqeq %H1, %H4\n"
1194+ "strexdeq %0, %5, %H5, [%3]"
1195+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1196+ : "r" (&ptr->counter), "r" (old), "r" (new)
1197+ : "cc");
1198+ } while (res);
1199+
1200+ smp_mb();
1201+
1202+ return oldval;
1203+}
1204+
1205 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1206 {
1207 u64 result;
1208@@ -395,21 +732,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1209
1210 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1211 {
1212- u64 result;
1213- unsigned long tmp;
1214+ u64 result, tmp;
1215
1216 smp_mb();
1217
1218 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1219-"1: ldrexd %0, %H0, [%3]\n"
1220-" subs %0, %0, #1\n"
1221-" sbc %H0, %H0, #0\n"
1222+"1: ldrexd %1, %H1, [%3]\n"
1223+" subs %0, %1, #1\n"
1224+" sbc %H0, %H1, #0\n"
1225+
1226+#ifdef CONFIG_PAX_REFCOUNT
1227+" bvc 3f\n"
1228+" mov %0, %1\n"
1229+" mov %H0, %H1\n"
1230+"2: bkpt 0xf103\n"
1231+"3:\n"
1232+#endif
1233+
1234 " teq %H0, #0\n"
1235-" bmi 2f\n"
1236+" bmi 4f\n"
1237 " strexd %1, %0, %H0, [%3]\n"
1238 " teq %1, #0\n"
1239 " bne 1b\n"
1240-"2:"
1241+"4:\n"
1242+
1243+#ifdef CONFIG_PAX_REFCOUNT
1244+ _ASM_EXTABLE(2b, 4b)
1245+#endif
1246+
1247 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1248 : "r" (&v->counter)
1249 : "cc");
1250@@ -432,13 +782,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1251 " teq %0, %5\n"
1252 " teqeq %H0, %H5\n"
1253 " moveq %1, #0\n"
1254-" beq 2f\n"
1255+" beq 4f\n"
1256 " adds %0, %0, %6\n"
1257 " adc %H0, %H0, %H6\n"
1258+
1259+#ifdef CONFIG_PAX_REFCOUNT
1260+" bvc 3f\n"
1261+"2: bkpt 0xf103\n"
1262+"3:\n"
1263+#endif
1264+
1265 " strexd %2, %0, %H0, [%4]\n"
1266 " teq %2, #0\n"
1267 " bne 1b\n"
1268-"2:"
1269+"4:\n"
1270+
1271+#ifdef CONFIG_PAX_REFCOUNT
1272+ _ASM_EXTABLE(2b, 4b)
1273+#endif
1274+
1275 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1276 : "r" (&v->counter), "r" (u), "r" (a)
1277 : "cc");
1278@@ -451,10 +813,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1279
1280 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1281 #define atomic64_inc(v) atomic64_add(1LL, (v))
1282+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1283 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1284+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1285 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1286 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1287 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1288+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1289 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1290 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1291 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1292diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1293index 75fe66b..2255c86 100644
1294--- a/arch/arm/include/asm/cache.h
1295+++ b/arch/arm/include/asm/cache.h
1296@@ -4,8 +4,10 @@
1297 #ifndef __ASMARM_CACHE_H
1298 #define __ASMARM_CACHE_H
1299
1300+#include <linux/const.h>
1301+
1302 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1303-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1304+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1305
1306 /*
1307 * Memory returned by kmalloc() may be used for DMA, so we must make
1308diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1309index d5d8d5c..ad92c96 100644
1310--- a/arch/arm/include/asm/cacheflush.h
1311+++ b/arch/arm/include/asm/cacheflush.h
1312@@ -108,7 +108,7 @@ struct cpu_cache_fns {
1313 void (*dma_unmap_area)(const void *, size_t, int);
1314
1315 void (*dma_flush_range)(const void *, const void *);
1316-};
1317+} __no_const;
1318
1319 /*
1320 * Select the calling method
1321diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1322index 0e9ce8d..6ef1e03 100644
1323--- a/arch/arm/include/asm/elf.h
1324+++ b/arch/arm/include/asm/elf.h
1325@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1326 the loader. We need to make sure that it is out of the way of the program
1327 that it will "exec", and that there is sufficient room for the brk. */
1328
1329-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1330+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1331+
1332+#ifdef CONFIG_PAX_ASLR
1333+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1334+
1335+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1336+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1337+#endif
1338
1339 /* When the program starts, a1 contains a pointer to a function to be
1340 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1341@@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1342 extern void elf_set_personality(const struct elf32_hdr *);
1343 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1344
1345-struct mm_struct;
1346-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1347-#define arch_randomize_brk arch_randomize_brk
1348-
1349 extern int vectors_user_mapping(void);
1350 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
1351 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
1352diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1353index e51b1e8..32a3113 100644
1354--- a/arch/arm/include/asm/kmap_types.h
1355+++ b/arch/arm/include/asm/kmap_types.h
1356@@ -21,6 +21,7 @@ enum km_type {
1357 KM_L1_CACHE,
1358 KM_L2_CACHE,
1359 KM_KDB,
1360+ KM_CLEARPAGE,
1361 KM_TYPE_NR
1362 };
1363
1364diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1365index 53426c6..c7baff3 100644
1366--- a/arch/arm/include/asm/outercache.h
1367+++ b/arch/arm/include/asm/outercache.h
1368@@ -35,7 +35,7 @@ struct outer_cache_fns {
1369 #endif
1370 void (*set_debug)(unsigned long);
1371 void (*resume)(void);
1372-};
1373+} __no_const;
1374
1375 #ifdef CONFIG_OUTER_CACHE
1376
1377diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1378index 97b440c..b7ff179 100644
1379--- a/arch/arm/include/asm/page.h
1380+++ b/arch/arm/include/asm/page.h
1381@@ -123,7 +123,7 @@ struct cpu_user_fns {
1382 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1383 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1384 unsigned long vaddr, struct vm_area_struct *vma);
1385-};
1386+} __no_const;
1387
1388 #ifdef MULTI_USER
1389 extern struct cpu_user_fns cpu_user;
1390diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
1391index e4c96cc..1145653 100644
1392--- a/arch/arm/include/asm/system.h
1393+++ b/arch/arm/include/asm/system.h
1394@@ -98,6 +98,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
1395
1396 #define xchg(ptr,x) \
1397 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1398+#define xchg_unchecked(ptr,x) \
1399+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1400
1401 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
1402
1403@@ -534,6 +536,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
1404
1405 #endif /* __LINUX_ARM_ARCH__ >= 6 */
1406
1407+#define _ASM_EXTABLE(from, to) \
1408+" .pushsection __ex_table,\"a\"\n"\
1409+" .align 3\n" \
1410+" .long " #from ", " #to"\n" \
1411+" .popsection"
1412+
1413+
1414 #endif /* __ASSEMBLY__ */
1415
1416 #define arch_align_stack(x) (x)
1417diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1418index 2958976..12ccac4 100644
1419--- a/arch/arm/include/asm/uaccess.h
1420+++ b/arch/arm/include/asm/uaccess.h
1421@@ -22,6 +22,8 @@
1422 #define VERIFY_READ 0
1423 #define VERIFY_WRITE 1
1424
1425+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1426+
1427 /*
1428 * The exception table consists of pairs of addresses: the first is the
1429 * address of an instruction that is allowed to fault, and the second is
1430@@ -387,8 +389,23 @@ do { \
1431
1432
1433 #ifdef CONFIG_MMU
1434-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1435-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1436+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1437+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1438+
1439+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1440+{
1441+ if (!__builtin_constant_p(n))
1442+ check_object_size(to, n, false);
1443+ return ___copy_from_user(to, from, n);
1444+}
1445+
1446+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1447+{
1448+ if (!__builtin_constant_p(n))
1449+ check_object_size(from, n, true);
1450+ return ___copy_to_user(to, from, n);
1451+}
1452+
1453 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1454 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1455 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1456@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1457
1458 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1459 {
1460+ if ((long)n < 0)
1461+ return n;
1462+
1463 if (access_ok(VERIFY_READ, from, n))
1464 n = __copy_from_user(to, from, n);
1465 else /* security hole - plug it */
1466@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1467
1468 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1469 {
1470+ if ((long)n < 0)
1471+ return n;
1472+
1473 if (access_ok(VERIFY_WRITE, to, n))
1474 n = __copy_to_user(to, from, n);
1475 return n;
1476diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1477index 5b0bce6..becd81c 100644
1478--- a/arch/arm/kernel/armksyms.c
1479+++ b/arch/arm/kernel/armksyms.c
1480@@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1481 #ifdef CONFIG_MMU
1482 EXPORT_SYMBOL(copy_page);
1483
1484-EXPORT_SYMBOL(__copy_from_user);
1485-EXPORT_SYMBOL(__copy_to_user);
1486+EXPORT_SYMBOL(___copy_from_user);
1487+EXPORT_SYMBOL(___copy_to_user);
1488 EXPORT_SYMBOL(__clear_user);
1489
1490 EXPORT_SYMBOL(__get_user_1);
1491diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1492index 971d65c..cc936fb 100644
1493--- a/arch/arm/kernel/process.c
1494+++ b/arch/arm/kernel/process.c
1495@@ -28,7 +28,6 @@
1496 #include <linux/tick.h>
1497 #include <linux/utsname.h>
1498 #include <linux/uaccess.h>
1499-#include <linux/random.h>
1500 #include <linux/hw_breakpoint.h>
1501 #include <linux/cpuidle.h>
1502
1503@@ -273,9 +272,10 @@ void machine_power_off(void)
1504 machine_shutdown();
1505 if (pm_power_off)
1506 pm_power_off();
1507+ BUG();
1508 }
1509
1510-void machine_restart(char *cmd)
1511+__noreturn void machine_restart(char *cmd)
1512 {
1513 machine_shutdown();
1514
1515@@ -517,12 +517,6 @@ unsigned long get_wchan(struct task_struct *p)
1516 return 0;
1517 }
1518
1519-unsigned long arch_randomize_brk(struct mm_struct *mm)
1520-{
1521- unsigned long range_end = mm->brk + 0x02000000;
1522- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1523-}
1524-
1525 #ifdef CONFIG_MMU
1526 /*
1527 * The vectors page is always readable from user space for the
1528diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1529index a255c39..4a19b25 100644
1530--- a/arch/arm/kernel/setup.c
1531+++ b/arch/arm/kernel/setup.c
1532@@ -109,13 +109,13 @@ struct processor processor __read_mostly;
1533 struct cpu_tlb_fns cpu_tlb __read_mostly;
1534 #endif
1535 #ifdef MULTI_USER
1536-struct cpu_user_fns cpu_user __read_mostly;
1537+struct cpu_user_fns cpu_user __read_only;
1538 #endif
1539 #ifdef MULTI_CACHE
1540-struct cpu_cache_fns cpu_cache __read_mostly;
1541+struct cpu_cache_fns cpu_cache __read_only;
1542 #endif
1543 #ifdef CONFIG_OUTER_CACHE
1544-struct outer_cache_fns outer_cache __read_mostly;
1545+struct outer_cache_fns outer_cache __read_only;
1546 EXPORT_SYMBOL(outer_cache);
1547 #endif
1548
1549diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1550index f84dfe6..13e94f7 100644
1551--- a/arch/arm/kernel/traps.c
1552+++ b/arch/arm/kernel/traps.c
1553@@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1554
1555 static DEFINE_RAW_SPINLOCK(die_lock);
1556
1557+extern void gr_handle_kernel_exploit(void);
1558+
1559 /*
1560 * This function is protected against re-entrancy.
1561 */
1562@@ -291,6 +293,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1563 panic("Fatal exception in interrupt");
1564 if (panic_on_oops)
1565 panic("Fatal exception");
1566+
1567+ gr_handle_kernel_exploit();
1568+
1569 if (ret != NOTIFY_STOP)
1570 do_exit(SIGSEGV);
1571 }
1572diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1573index 66a477a..bee61d3 100644
1574--- a/arch/arm/lib/copy_from_user.S
1575+++ b/arch/arm/lib/copy_from_user.S
1576@@ -16,7 +16,7 @@
1577 /*
1578 * Prototype:
1579 *
1580- * size_t __copy_from_user(void *to, const void *from, size_t n)
1581+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
1582 *
1583 * Purpose:
1584 *
1585@@ -84,11 +84,11 @@
1586
1587 .text
1588
1589-ENTRY(__copy_from_user)
1590+ENTRY(___copy_from_user)
1591
1592 #include "copy_template.S"
1593
1594-ENDPROC(__copy_from_user)
1595+ENDPROC(___copy_from_user)
1596
1597 .pushsection .fixup,"ax"
1598 .align 0
1599diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1600index 6ee2f67..d1cce76 100644
1601--- a/arch/arm/lib/copy_page.S
1602+++ b/arch/arm/lib/copy_page.S
1603@@ -10,6 +10,7 @@
1604 * ASM optimised string functions
1605 */
1606 #include <linux/linkage.h>
1607+#include <linux/const.h>
1608 #include <asm/assembler.h>
1609 #include <asm/asm-offsets.h>
1610 #include <asm/cache.h>
1611diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1612index d066df6..df28194 100644
1613--- a/arch/arm/lib/copy_to_user.S
1614+++ b/arch/arm/lib/copy_to_user.S
1615@@ -16,7 +16,7 @@
1616 /*
1617 * Prototype:
1618 *
1619- * size_t __copy_to_user(void *to, const void *from, size_t n)
1620+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
1621 *
1622 * Purpose:
1623 *
1624@@ -88,11 +88,11 @@
1625 .text
1626
1627 ENTRY(__copy_to_user_std)
1628-WEAK(__copy_to_user)
1629+WEAK(___copy_to_user)
1630
1631 #include "copy_template.S"
1632
1633-ENDPROC(__copy_to_user)
1634+ENDPROC(___copy_to_user)
1635 ENDPROC(__copy_to_user_std)
1636
1637 .pushsection .fixup,"ax"
1638diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1639index 5c908b1..e712687 100644
1640--- a/arch/arm/lib/uaccess.S
1641+++ b/arch/arm/lib/uaccess.S
1642@@ -20,7 +20,7 @@
1643
1644 #define PAGE_SHIFT 12
1645
1646-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1647+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1648 * Purpose : copy a block to user memory from kernel memory
1649 * Params : to - user memory
1650 * : from - kernel memory
1651@@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1652 sub r2, r2, ip
1653 b .Lc2u_dest_aligned
1654
1655-ENTRY(__copy_to_user)
1656+ENTRY(___copy_to_user)
1657 stmfd sp!, {r2, r4 - r7, lr}
1658 cmp r2, #4
1659 blt .Lc2u_not_enough
1660@@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
1661 ldrgtb r3, [r1], #0
1662 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1663 b .Lc2u_finished
1664-ENDPROC(__copy_to_user)
1665+ENDPROC(___copy_to_user)
1666
1667 .pushsection .fixup,"ax"
1668 .align 0
1669 9001: ldmfd sp!, {r0, r4 - r7, pc}
1670 .popsection
1671
1672-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1673+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1674 * Purpose : copy a block from user memory to kernel memory
1675 * Params : to - kernel memory
1676 * : from - user memory
1677@@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1678 sub r2, r2, ip
1679 b .Lcfu_dest_aligned
1680
1681-ENTRY(__copy_from_user)
1682+ENTRY(___copy_from_user)
1683 stmfd sp!, {r0, r2, r4 - r7, lr}
1684 cmp r2, #4
1685 blt .Lcfu_not_enough
1686@@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1687 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1688 strgtb r3, [r0], #1
1689 b .Lcfu_finished
1690-ENDPROC(__copy_from_user)
1691+ENDPROC(___copy_from_user)
1692
1693 .pushsection .fixup,"ax"
1694 .align 0
1695diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1696index 025f742..8432b08 100644
1697--- a/arch/arm/lib/uaccess_with_memcpy.c
1698+++ b/arch/arm/lib/uaccess_with_memcpy.c
1699@@ -104,7 +104,7 @@ out:
1700 }
1701
1702 unsigned long
1703-__copy_to_user(void __user *to, const void *from, unsigned long n)
1704+___copy_to_user(void __user *to, const void *from, unsigned long n)
1705 {
1706 /*
1707 * This test is stubbed out of the main function above to keep
1708diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1709index 6722627..8f97548c 100644
1710--- a/arch/arm/mach-omap2/board-n8x0.c
1711+++ b/arch/arm/mach-omap2/board-n8x0.c
1712@@ -597,7 +597,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1713 }
1714 #endif
1715
1716-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1717+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1718 .late_init = n8x0_menelaus_late_init,
1719 };
1720
1721diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
1722index 2b2d51c..0127490 100644
1723--- a/arch/arm/mach-ux500/mbox-db5500.c
1724+++ b/arch/arm/mach-ux500/mbox-db5500.c
1725@@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
1726 return sprintf(buf, "0x%X\n", mbox_value);
1727 }
1728
1729-static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1730+static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1731
1732 static int mbox_show(struct seq_file *s, void *data)
1733 {
1734diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1735index bb7eac3..3bade16 100644
1736--- a/arch/arm/mm/fault.c
1737+++ b/arch/arm/mm/fault.c
1738@@ -172,6 +172,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1739 }
1740 #endif
1741
1742+#ifdef CONFIG_PAX_PAGEEXEC
1743+ if (fsr & FSR_LNX_PF) {
1744+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1745+ do_group_exit(SIGKILL);
1746+ }
1747+#endif
1748+
1749 tsk->thread.address = addr;
1750 tsk->thread.error_code = fsr;
1751 tsk->thread.trap_no = 14;
1752@@ -393,6 +400,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1753 }
1754 #endif /* CONFIG_MMU */
1755
1756+#ifdef CONFIG_PAX_PAGEEXEC
1757+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1758+{
1759+ long i;
1760+
1761+ printk(KERN_ERR "PAX: bytes at PC: ");
1762+ for (i = 0; i < 20; i++) {
1763+ unsigned char c;
1764+ if (get_user(c, (__force unsigned char __user *)pc+i))
1765+ printk(KERN_CONT "?? ");
1766+ else
1767+ printk(KERN_CONT "%02x ", c);
1768+ }
1769+ printk("\n");
1770+
1771+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1772+ for (i = -1; i < 20; i++) {
1773+ unsigned long c;
1774+ if (get_user(c, (__force unsigned long __user *)sp+i))
1775+ printk(KERN_CONT "???????? ");
1776+ else
1777+ printk(KERN_CONT "%08lx ", c);
1778+ }
1779+ printk("\n");
1780+}
1781+#endif
1782+
1783 /*
1784 * First Level Translation Fault Handler
1785 *
1786@@ -573,6 +607,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1787 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1788 struct siginfo info;
1789
1790+#ifdef CONFIG_PAX_REFCOUNT
1791+ if (fsr_fs(ifsr) == 2) {
1792+ unsigned int bkpt;
1793+
1794+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1795+ current->thread.error_code = ifsr;
1796+ current->thread.trap_no = 0;
1797+ pax_report_refcount_overflow(regs);
1798+ fixup_exception(regs);
1799+ return;
1800+ }
1801+ }
1802+#endif
1803+
1804 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1805 return;
1806
1807diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1808index ce8cb19..3ec539d 100644
1809--- a/arch/arm/mm/mmap.c
1810+++ b/arch/arm/mm/mmap.c
1811@@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1812 if (len > TASK_SIZE)
1813 return -ENOMEM;
1814
1815+#ifdef CONFIG_PAX_RANDMMAP
1816+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1817+#endif
1818+
1819 if (addr) {
1820 if (do_align)
1821 addr = COLOUR_ALIGN(addr, pgoff);
1822@@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1823 addr = PAGE_ALIGN(addr);
1824
1825 vma = find_vma(mm, addr);
1826- if (TASK_SIZE - len >= addr &&
1827- (!vma || addr + len <= vma->vm_start))
1828+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1829 return addr;
1830 }
1831 if (len > mm->cached_hole_size) {
1832- start_addr = addr = mm->free_area_cache;
1833+ start_addr = addr = mm->free_area_cache;
1834 } else {
1835- start_addr = addr = mm->mmap_base;
1836- mm->cached_hole_size = 0;
1837+ start_addr = addr = mm->mmap_base;
1838+ mm->cached_hole_size = 0;
1839 }
1840
1841 full_search:
1842@@ -124,14 +127,14 @@ full_search:
1843 * Start a new search - just in case we missed
1844 * some holes.
1845 */
1846- if (start_addr != TASK_UNMAPPED_BASE) {
1847- start_addr = addr = TASK_UNMAPPED_BASE;
1848+ if (start_addr != mm->mmap_base) {
1849+ start_addr = addr = mm->mmap_base;
1850 mm->cached_hole_size = 0;
1851 goto full_search;
1852 }
1853 return -ENOMEM;
1854 }
1855- if (!vma || addr + len <= vma->vm_start) {
1856+ if (check_heap_stack_gap(vma, addr, len)) {
1857 /*
1858 * Remember the place where we stopped the search:
1859 */
1860@@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1861
1862 if (mmap_is_legacy()) {
1863 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
1864+
1865+#ifdef CONFIG_PAX_RANDMMAP
1866+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1867+ mm->mmap_base += mm->delta_mmap;
1868+#endif
1869+
1870 mm->get_unmapped_area = arch_get_unmapped_area;
1871 mm->unmap_area = arch_unmap_area;
1872 } else {
1873 mm->mmap_base = mmap_base(random_factor);
1874+
1875+#ifdef CONFIG_PAX_RANDMMAP
1876+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1877+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
1878+#endif
1879+
1880 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
1881 mm->unmap_area = arch_unmap_area_topdown;
1882 }
1883diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
1884index 71a6827..e7fbc23 100644
1885--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
1886+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
1887@@ -43,7 +43,7 @@ struct samsung_dma_ops {
1888 int (*started)(unsigned ch);
1889 int (*flush)(unsigned ch);
1890 int (*stop)(unsigned ch);
1891-};
1892+} __no_const;
1893
1894 extern void *samsung_dmadev_get_ops(void);
1895 extern void *s3c_dma_get_ops(void);
1896diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
1897index 5f28cae..3d23723 100644
1898--- a/arch/arm/plat-samsung/include/plat/ehci.h
1899+++ b/arch/arm/plat-samsung/include/plat/ehci.h
1900@@ -14,7 +14,7 @@
1901 struct s5p_ehci_platdata {
1902 int (*phy_init)(struct platform_device *pdev, int type);
1903 int (*phy_exit)(struct platform_device *pdev, int type);
1904-};
1905+} __no_const;
1906
1907 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
1908
1909diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
1910index c3a58a1..78fbf54 100644
1911--- a/arch/avr32/include/asm/cache.h
1912+++ b/arch/avr32/include/asm/cache.h
1913@@ -1,8 +1,10 @@
1914 #ifndef __ASM_AVR32_CACHE_H
1915 #define __ASM_AVR32_CACHE_H
1916
1917+#include <linux/const.h>
1918+
1919 #define L1_CACHE_SHIFT 5
1920-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1921+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1922
1923 /*
1924 * Memory returned by kmalloc() may be used for DMA, so we must make
1925diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1926index 3b3159b..425ea94 100644
1927--- a/arch/avr32/include/asm/elf.h
1928+++ b/arch/avr32/include/asm/elf.h
1929@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1930 the loader. We need to make sure that it is out of the way of the program
1931 that it will "exec", and that there is sufficient room for the brk. */
1932
1933-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1934+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1935
1936+#ifdef CONFIG_PAX_ASLR
1937+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1938+
1939+#define PAX_DELTA_MMAP_LEN 15
1940+#define PAX_DELTA_STACK_LEN 15
1941+#endif
1942
1943 /* This yields a mask that user programs can use to figure out what
1944 instruction set this CPU supports. This could be done in user space,
1945diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1946index b7f5c68..556135c 100644
1947--- a/arch/avr32/include/asm/kmap_types.h
1948+++ b/arch/avr32/include/asm/kmap_types.h
1949@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1950 D(11) KM_IRQ1,
1951 D(12) KM_SOFTIRQ0,
1952 D(13) KM_SOFTIRQ1,
1953-D(14) KM_TYPE_NR
1954+D(14) KM_CLEARPAGE,
1955+D(15) KM_TYPE_NR
1956 };
1957
1958 #undef D
1959diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1960index f7040a1..db9f300 100644
1961--- a/arch/avr32/mm/fault.c
1962+++ b/arch/avr32/mm/fault.c
1963@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1964
1965 int exception_trace = 1;
1966
1967+#ifdef CONFIG_PAX_PAGEEXEC
1968+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1969+{
1970+ unsigned long i;
1971+
1972+ printk(KERN_ERR "PAX: bytes at PC: ");
1973+ for (i = 0; i < 20; i++) {
1974+ unsigned char c;
1975+ if (get_user(c, (unsigned char *)pc+i))
1976+ printk(KERN_CONT "???????? ");
1977+ else
1978+ printk(KERN_CONT "%02x ", c);
1979+ }
1980+ printk("\n");
1981+}
1982+#endif
1983+
1984 /*
1985 * This routine handles page faults. It determines the address and the
1986 * problem, and then passes it off to one of the appropriate routines.
1987@@ -156,6 +173,16 @@ bad_area:
1988 up_read(&mm->mmap_sem);
1989
1990 if (user_mode(regs)) {
1991+
1992+#ifdef CONFIG_PAX_PAGEEXEC
1993+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1994+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1995+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1996+ do_group_exit(SIGKILL);
1997+ }
1998+ }
1999+#endif
2000+
2001 if (exception_trace && printk_ratelimit())
2002 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2003 "sp %08lx ecr %lu\n",
2004diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2005index 568885a..f8008df 100644
2006--- a/arch/blackfin/include/asm/cache.h
2007+++ b/arch/blackfin/include/asm/cache.h
2008@@ -7,6 +7,7 @@
2009 #ifndef __ARCH_BLACKFIN_CACHE_H
2010 #define __ARCH_BLACKFIN_CACHE_H
2011
2012+#include <linux/const.h>
2013 #include <linux/linkage.h> /* for asmlinkage */
2014
2015 /*
2016@@ -14,7 +15,7 @@
2017 * Blackfin loads 32 bytes for cache
2018 */
2019 #define L1_CACHE_SHIFT 5
2020-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2021+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2022 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2023
2024 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2025diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2026index aea2718..3639a60 100644
2027--- a/arch/cris/include/arch-v10/arch/cache.h
2028+++ b/arch/cris/include/arch-v10/arch/cache.h
2029@@ -1,8 +1,9 @@
2030 #ifndef _ASM_ARCH_CACHE_H
2031 #define _ASM_ARCH_CACHE_H
2032
2033+#include <linux/const.h>
2034 /* Etrax 100LX have 32-byte cache-lines. */
2035-#define L1_CACHE_BYTES 32
2036 #define L1_CACHE_SHIFT 5
2037+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2038
2039 #endif /* _ASM_ARCH_CACHE_H */
2040diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2041index 1de779f..336fad3 100644
2042--- a/arch/cris/include/arch-v32/arch/cache.h
2043+++ b/arch/cris/include/arch-v32/arch/cache.h
2044@@ -1,11 +1,12 @@
2045 #ifndef _ASM_CRIS_ARCH_CACHE_H
2046 #define _ASM_CRIS_ARCH_CACHE_H
2047
2048+#include <linux/const.h>
2049 #include <arch/hwregs/dma.h>
2050
2051 /* A cache-line is 32 bytes. */
2052-#define L1_CACHE_BYTES 32
2053 #define L1_CACHE_SHIFT 5
2054+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2055
2056 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2057
2058diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2059index 0d8a7d6..d0c9ff5 100644
2060--- a/arch/frv/include/asm/atomic.h
2061+++ b/arch/frv/include/asm/atomic.h
2062@@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
2063 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2064 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2065
2066+#define atomic64_read_unchecked(v) atomic64_read(v)
2067+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2068+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2069+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2070+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2071+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2072+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2073+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2074+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2075+
2076 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2077 {
2078 int c, old;
2079diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2080index 2797163..c2a401d 100644
2081--- a/arch/frv/include/asm/cache.h
2082+++ b/arch/frv/include/asm/cache.h
2083@@ -12,10 +12,11 @@
2084 #ifndef __ASM_CACHE_H
2085 #define __ASM_CACHE_H
2086
2087+#include <linux/const.h>
2088
2089 /* bytes per L1 cache line */
2090 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2091-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2092+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2093
2094 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2095 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2096diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2097index f8e16b2..c73ff79 100644
2098--- a/arch/frv/include/asm/kmap_types.h
2099+++ b/arch/frv/include/asm/kmap_types.h
2100@@ -23,6 +23,7 @@ enum km_type {
2101 KM_IRQ1,
2102 KM_SOFTIRQ0,
2103 KM_SOFTIRQ1,
2104+ KM_CLEARPAGE,
2105 KM_TYPE_NR
2106 };
2107
2108diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2109index 385fd30..6c3d97e 100644
2110--- a/arch/frv/mm/elf-fdpic.c
2111+++ b/arch/frv/mm/elf-fdpic.c
2112@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2113 if (addr) {
2114 addr = PAGE_ALIGN(addr);
2115 vma = find_vma(current->mm, addr);
2116- if (TASK_SIZE - len >= addr &&
2117- (!vma || addr + len <= vma->vm_start))
2118+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2119 goto success;
2120 }
2121
2122@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2123 for (; vma; vma = vma->vm_next) {
2124 if (addr > limit)
2125 break;
2126- if (addr + len <= vma->vm_start)
2127+ if (check_heap_stack_gap(vma, addr, len))
2128 goto success;
2129 addr = vma->vm_end;
2130 }
2131@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2132 for (; vma; vma = vma->vm_next) {
2133 if (addr > limit)
2134 break;
2135- if (addr + len <= vma->vm_start)
2136+ if (check_heap_stack_gap(vma, addr, len))
2137 goto success;
2138 addr = vma->vm_end;
2139 }
2140diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2141index c635028..6d9445a 100644
2142--- a/arch/h8300/include/asm/cache.h
2143+++ b/arch/h8300/include/asm/cache.h
2144@@ -1,8 +1,10 @@
2145 #ifndef __ARCH_H8300_CACHE_H
2146 #define __ARCH_H8300_CACHE_H
2147
2148+#include <linux/const.h>
2149+
2150 /* bytes per L1 cache line */
2151-#define L1_CACHE_BYTES 4
2152+#define L1_CACHE_BYTES _AC(4,UL)
2153
2154 /* m68k-elf-gcc 2.95.2 doesn't like these */
2155
2156diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2157index 0f01de2..d37d309 100644
2158--- a/arch/hexagon/include/asm/cache.h
2159+++ b/arch/hexagon/include/asm/cache.h
2160@@ -21,9 +21,11 @@
2161 #ifndef __ASM_CACHE_H
2162 #define __ASM_CACHE_H
2163
2164+#include <linux/const.h>
2165+
2166 /* Bytes per L1 cache line */
2167-#define L1_CACHE_SHIFT (5)
2168-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2169+#define L1_CACHE_SHIFT 5
2170+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2171
2172 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2173 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2174diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2175index 3fad89e..3047da5 100644
2176--- a/arch/ia64/include/asm/atomic.h
2177+++ b/arch/ia64/include/asm/atomic.h
2178@@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2179 #define atomic64_inc(v) atomic64_add(1, (v))
2180 #define atomic64_dec(v) atomic64_sub(1, (v))
2181
2182+#define atomic64_read_unchecked(v) atomic64_read(v)
2183+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2184+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2185+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2186+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2187+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2188+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2189+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2190+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2191+
2192 /* Atomic operations are already serializing */
2193 #define smp_mb__before_atomic_dec() barrier()
2194 #define smp_mb__after_atomic_dec() barrier()
2195diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2196index 988254a..e1ee885 100644
2197--- a/arch/ia64/include/asm/cache.h
2198+++ b/arch/ia64/include/asm/cache.h
2199@@ -1,6 +1,7 @@
2200 #ifndef _ASM_IA64_CACHE_H
2201 #define _ASM_IA64_CACHE_H
2202
2203+#include <linux/const.h>
2204
2205 /*
2206 * Copyright (C) 1998-2000 Hewlett-Packard Co
2207@@ -9,7 +10,7 @@
2208
2209 /* Bytes per L1 (data) cache line. */
2210 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2211-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2212+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2213
2214 #ifdef CONFIG_SMP
2215 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2216diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2217index b5298eb..67c6e62 100644
2218--- a/arch/ia64/include/asm/elf.h
2219+++ b/arch/ia64/include/asm/elf.h
2220@@ -42,6 +42,13 @@
2221 */
2222 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2223
2224+#ifdef CONFIG_PAX_ASLR
2225+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2226+
2227+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2228+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2229+#endif
2230+
2231 #define PT_IA_64_UNWIND 0x70000001
2232
2233 /* IA-64 relocations: */
2234diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2235index 1a97af3..7529d31 100644
2236--- a/arch/ia64/include/asm/pgtable.h
2237+++ b/arch/ia64/include/asm/pgtable.h
2238@@ -12,7 +12,7 @@
2239 * David Mosberger-Tang <davidm@hpl.hp.com>
2240 */
2241
2242-
2243+#include <linux/const.h>
2244 #include <asm/mman.h>
2245 #include <asm/page.h>
2246 #include <asm/processor.h>
2247@@ -143,6 +143,17 @@
2248 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2249 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2250 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2251+
2252+#ifdef CONFIG_PAX_PAGEEXEC
2253+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2254+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2255+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2256+#else
2257+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2258+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2259+# define PAGE_COPY_NOEXEC PAGE_COPY
2260+#endif
2261+
2262 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2263 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2264 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2265diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2266index b77768d..e0795eb 100644
2267--- a/arch/ia64/include/asm/spinlock.h
2268+++ b/arch/ia64/include/asm/spinlock.h
2269@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2270 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2271
2272 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2273- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2274+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2275 }
2276
2277 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2278diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2279index 449c8c0..432a3d2 100644
2280--- a/arch/ia64/include/asm/uaccess.h
2281+++ b/arch/ia64/include/asm/uaccess.h
2282@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2283 const void *__cu_from = (from); \
2284 long __cu_len = (n); \
2285 \
2286- if (__access_ok(__cu_to, __cu_len, get_fs())) \
2287+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2288 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2289 __cu_len; \
2290 })
2291@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2292 long __cu_len = (n); \
2293 \
2294 __chk_user_ptr(__cu_from); \
2295- if (__access_ok(__cu_from, __cu_len, get_fs())) \
2296+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2297 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2298 __cu_len; \
2299 })
2300diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2301index 24603be..948052d 100644
2302--- a/arch/ia64/kernel/module.c
2303+++ b/arch/ia64/kernel/module.c
2304@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2305 void
2306 module_free (struct module *mod, void *module_region)
2307 {
2308- if (mod && mod->arch.init_unw_table &&
2309- module_region == mod->module_init) {
2310+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2311 unw_remove_unwind_table(mod->arch.init_unw_table);
2312 mod->arch.init_unw_table = NULL;
2313 }
2314@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2315 }
2316
2317 static inline int
2318+in_init_rx (const struct module *mod, uint64_t addr)
2319+{
2320+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2321+}
2322+
2323+static inline int
2324+in_init_rw (const struct module *mod, uint64_t addr)
2325+{
2326+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2327+}
2328+
2329+static inline int
2330 in_init (const struct module *mod, uint64_t addr)
2331 {
2332- return addr - (uint64_t) mod->module_init < mod->init_size;
2333+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2334+}
2335+
2336+static inline int
2337+in_core_rx (const struct module *mod, uint64_t addr)
2338+{
2339+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2340+}
2341+
2342+static inline int
2343+in_core_rw (const struct module *mod, uint64_t addr)
2344+{
2345+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2346 }
2347
2348 static inline int
2349 in_core (const struct module *mod, uint64_t addr)
2350 {
2351- return addr - (uint64_t) mod->module_core < mod->core_size;
2352+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2353 }
2354
2355 static inline int
2356@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2357 break;
2358
2359 case RV_BDREL:
2360- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2361+ if (in_init_rx(mod, val))
2362+ val -= (uint64_t) mod->module_init_rx;
2363+ else if (in_init_rw(mod, val))
2364+ val -= (uint64_t) mod->module_init_rw;
2365+ else if (in_core_rx(mod, val))
2366+ val -= (uint64_t) mod->module_core_rx;
2367+ else if (in_core_rw(mod, val))
2368+ val -= (uint64_t) mod->module_core_rw;
2369 break;
2370
2371 case RV_LTV:
2372@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2373 * addresses have been selected...
2374 */
2375 uint64_t gp;
2376- if (mod->core_size > MAX_LTOFF)
2377+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2378 /*
2379 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2380 * at the end of the module.
2381 */
2382- gp = mod->core_size - MAX_LTOFF / 2;
2383+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2384 else
2385- gp = mod->core_size / 2;
2386- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2387+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2388+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2389 mod->arch.gp = gp;
2390 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2391 }
2392diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2393index 609d500..7dde2a8 100644
2394--- a/arch/ia64/kernel/sys_ia64.c
2395+++ b/arch/ia64/kernel/sys_ia64.c
2396@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2397 if (REGION_NUMBER(addr) == RGN_HPAGE)
2398 addr = 0;
2399 #endif
2400+
2401+#ifdef CONFIG_PAX_RANDMMAP
2402+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2403+ addr = mm->free_area_cache;
2404+ else
2405+#endif
2406+
2407 if (!addr)
2408 addr = mm->free_area_cache;
2409
2410@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2411 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2412 /* At this point: (!vma || addr < vma->vm_end). */
2413 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2414- if (start_addr != TASK_UNMAPPED_BASE) {
2415+ if (start_addr != mm->mmap_base) {
2416 /* Start a new search --- just in case we missed some holes. */
2417- addr = TASK_UNMAPPED_BASE;
2418+ addr = mm->mmap_base;
2419 goto full_search;
2420 }
2421 return -ENOMEM;
2422 }
2423- if (!vma || addr + len <= vma->vm_start) {
2424+ if (check_heap_stack_gap(vma, addr, len)) {
2425 /* Remember the address where we stopped this search: */
2426 mm->free_area_cache = addr + len;
2427 return addr;
2428diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2429index 53c0ba0..2accdde 100644
2430--- a/arch/ia64/kernel/vmlinux.lds.S
2431+++ b/arch/ia64/kernel/vmlinux.lds.S
2432@@ -199,7 +199,7 @@ SECTIONS {
2433 /* Per-cpu data: */
2434 . = ALIGN(PERCPU_PAGE_SIZE);
2435 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2436- __phys_per_cpu_start = __per_cpu_load;
2437+ __phys_per_cpu_start = per_cpu_load;
2438 /*
2439 * ensure percpu data fits
2440 * into percpu page size
2441diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2442index 20b3593..1ce77f0 100644
2443--- a/arch/ia64/mm/fault.c
2444+++ b/arch/ia64/mm/fault.c
2445@@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
2446 return pte_present(pte);
2447 }
2448
2449+#ifdef CONFIG_PAX_PAGEEXEC
2450+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2451+{
2452+ unsigned long i;
2453+
2454+ printk(KERN_ERR "PAX: bytes at PC: ");
2455+ for (i = 0; i < 8; i++) {
2456+ unsigned int c;
2457+ if (get_user(c, (unsigned int *)pc+i))
2458+ printk(KERN_CONT "???????? ");
2459+ else
2460+ printk(KERN_CONT "%08x ", c);
2461+ }
2462+ printk("\n");
2463+}
2464+#endif
2465+
2466 void __kprobes
2467 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2468 {
2469@@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2470 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2471 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2472
2473- if ((vma->vm_flags & mask) != mask)
2474+ if ((vma->vm_flags & mask) != mask) {
2475+
2476+#ifdef CONFIG_PAX_PAGEEXEC
2477+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2478+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2479+ goto bad_area;
2480+
2481+ up_read(&mm->mmap_sem);
2482+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2483+ do_group_exit(SIGKILL);
2484+ }
2485+#endif
2486+
2487 goto bad_area;
2488
2489+ }
2490+
2491 /*
2492 * If for any reason at all we couldn't handle the fault, make
2493 * sure we exit gracefully rather than endlessly redo the
2494diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2495index 5ca674b..e0e1b70 100644
2496--- a/arch/ia64/mm/hugetlbpage.c
2497+++ b/arch/ia64/mm/hugetlbpage.c
2498@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2499 /* At this point: (!vmm || addr < vmm->vm_end). */
2500 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2501 return -ENOMEM;
2502- if (!vmm || (addr + len) <= vmm->vm_start)
2503+ if (check_heap_stack_gap(vmm, addr, len))
2504 return addr;
2505 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2506 }
2507diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2508index 13df239d..cb52116 100644
2509--- a/arch/ia64/mm/init.c
2510+++ b/arch/ia64/mm/init.c
2511@@ -121,6 +121,19 @@ ia64_init_addr_space (void)
2512 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2513 vma->vm_end = vma->vm_start + PAGE_SIZE;
2514 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2515+
2516+#ifdef CONFIG_PAX_PAGEEXEC
2517+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2518+ vma->vm_flags &= ~VM_EXEC;
2519+
2520+#ifdef CONFIG_PAX_MPROTECT
2521+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
2522+ vma->vm_flags &= ~VM_MAYEXEC;
2523+#endif
2524+
2525+ }
2526+#endif
2527+
2528 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2529 down_write(&current->mm->mmap_sem);
2530 if (insert_vm_struct(current->mm, vma)) {
2531diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2532index 40b3ee9..8c2c112 100644
2533--- a/arch/m32r/include/asm/cache.h
2534+++ b/arch/m32r/include/asm/cache.h
2535@@ -1,8 +1,10 @@
2536 #ifndef _ASM_M32R_CACHE_H
2537 #define _ASM_M32R_CACHE_H
2538
2539+#include <linux/const.h>
2540+
2541 /* L1 cache line size */
2542 #define L1_CACHE_SHIFT 4
2543-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2544+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2545
2546 #endif /* _ASM_M32R_CACHE_H */
2547diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2548index 82abd15..d95ae5d 100644
2549--- a/arch/m32r/lib/usercopy.c
2550+++ b/arch/m32r/lib/usercopy.c
2551@@ -14,6 +14,9 @@
2552 unsigned long
2553 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2554 {
2555+ if ((long)n < 0)
2556+ return n;
2557+
2558 prefetch(from);
2559 if (access_ok(VERIFY_WRITE, to, n))
2560 __copy_user(to,from,n);
2561@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2562 unsigned long
2563 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2564 {
2565+ if ((long)n < 0)
2566+ return n;
2567+
2568 prefetchw(to);
2569 if (access_ok(VERIFY_READ, from, n))
2570 __copy_user_zeroing(to,from,n);
2571diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2572index 0395c51..5f26031 100644
2573--- a/arch/m68k/include/asm/cache.h
2574+++ b/arch/m68k/include/asm/cache.h
2575@@ -4,9 +4,11 @@
2576 #ifndef __ARCH_M68K_CACHE_H
2577 #define __ARCH_M68K_CACHE_H
2578
2579+#include <linux/const.h>
2580+
2581 /* bytes per L1 cache line */
2582 #define L1_CACHE_SHIFT 4
2583-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2584+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2585
2586 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2587
2588diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2589index 4efe96a..60e8699 100644
2590--- a/arch/microblaze/include/asm/cache.h
2591+++ b/arch/microblaze/include/asm/cache.h
2592@@ -13,11 +13,12 @@
2593 #ifndef _ASM_MICROBLAZE_CACHE_H
2594 #define _ASM_MICROBLAZE_CACHE_H
2595
2596+#include <linux/const.h>
2597 #include <asm/registers.h>
2598
2599 #define L1_CACHE_SHIFT 5
2600 /* word-granular cache in microblaze */
2601-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2602+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2603
2604 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2605
2606diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2607index 1d93f81..67794d0 100644
2608--- a/arch/mips/include/asm/atomic.h
2609+++ b/arch/mips/include/asm/atomic.h
2610@@ -21,6 +21,10 @@
2611 #include <asm/war.h>
2612 #include <asm/system.h>
2613
2614+#ifdef CONFIG_GENERIC_ATOMIC64
2615+#include <asm-generic/atomic64.h>
2616+#endif
2617+
2618 #define ATOMIC_INIT(i) { (i) }
2619
2620 /*
2621@@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2622 */
2623 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2624
2625+#define atomic64_read_unchecked(v) atomic64_read(v)
2626+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2627+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2628+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2629+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2630+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2631+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2632+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2633+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2634+
2635 #endif /* CONFIG_64BIT */
2636
2637 /*
2638diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2639index b4db69f..8f3b093 100644
2640--- a/arch/mips/include/asm/cache.h
2641+++ b/arch/mips/include/asm/cache.h
2642@@ -9,10 +9,11 @@
2643 #ifndef _ASM_CACHE_H
2644 #define _ASM_CACHE_H
2645
2646+#include <linux/const.h>
2647 #include <kmalloc.h>
2648
2649 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2650-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2651+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2652
2653 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2654 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2655diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2656index 455c0ac..ad65fbe 100644
2657--- a/arch/mips/include/asm/elf.h
2658+++ b/arch/mips/include/asm/elf.h
2659@@ -372,13 +372,16 @@ extern const char *__elf_platform;
2660 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2661 #endif
2662
2663+#ifdef CONFIG_PAX_ASLR
2664+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2665+
2666+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2667+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2668+#endif
2669+
2670 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2671 struct linux_binprm;
2672 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2673 int uses_interp);
2674
2675-struct mm_struct;
2676-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2677-#define arch_randomize_brk arch_randomize_brk
2678-
2679 #endif /* _ASM_ELF_H */
2680diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2681index da9bd7d..91aa7ab 100644
2682--- a/arch/mips/include/asm/page.h
2683+++ b/arch/mips/include/asm/page.h
2684@@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2685 #ifdef CONFIG_CPU_MIPS32
2686 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2687 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2688- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2689+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2690 #else
2691 typedef struct { unsigned long long pte; } pte_t;
2692 #define pte_val(x) ((x).pte)
2693diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2694index 6018c80..7c37203 100644
2695--- a/arch/mips/include/asm/system.h
2696+++ b/arch/mips/include/asm/system.h
2697@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2698 */
2699 #define __ARCH_WANT_UNLOCKED_CTXSW
2700
2701-extern unsigned long arch_align_stack(unsigned long sp);
2702+#define arch_align_stack(x) ((x) & ~0xfUL)
2703
2704 #endif /* _ASM_SYSTEM_H */
2705diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2706index 9fdd8bc..4bd7f1a 100644
2707--- a/arch/mips/kernel/binfmt_elfn32.c
2708+++ b/arch/mips/kernel/binfmt_elfn32.c
2709@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2710 #undef ELF_ET_DYN_BASE
2711 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2712
2713+#ifdef CONFIG_PAX_ASLR
2714+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2715+
2716+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2717+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2718+#endif
2719+
2720 #include <asm/processor.h>
2721 #include <linux/module.h>
2722 #include <linux/elfcore.h>
2723diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2724index ff44823..97f8906 100644
2725--- a/arch/mips/kernel/binfmt_elfo32.c
2726+++ b/arch/mips/kernel/binfmt_elfo32.c
2727@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2728 #undef ELF_ET_DYN_BASE
2729 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2730
2731+#ifdef CONFIG_PAX_ASLR
2732+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2733+
2734+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2735+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2736+#endif
2737+
2738 #include <asm/processor.h>
2739
2740 /*
2741diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2742index 7955409..ceaea7c 100644
2743--- a/arch/mips/kernel/process.c
2744+++ b/arch/mips/kernel/process.c
2745@@ -483,15 +483,3 @@ unsigned long get_wchan(struct task_struct *task)
2746 out:
2747 return pc;
2748 }
2749-
2750-/*
2751- * Don't forget that the stack pointer must be aligned on a 8 bytes
2752- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2753- */
2754-unsigned long arch_align_stack(unsigned long sp)
2755-{
2756- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2757- sp -= get_random_int() & ~PAGE_MASK;
2758-
2759- return sp & ALMASK;
2760-}
2761diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2762index 69ebd58..e4bff83 100644
2763--- a/arch/mips/mm/fault.c
2764+++ b/arch/mips/mm/fault.c
2765@@ -28,6 +28,23 @@
2766 #include <asm/highmem.h> /* For VMALLOC_END */
2767 #include <linux/kdebug.h>
2768
2769+#ifdef CONFIG_PAX_PAGEEXEC
2770+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2771+{
2772+ unsigned long i;
2773+
2774+ printk(KERN_ERR "PAX: bytes at PC: ");
2775+ for (i = 0; i < 5; i++) {
2776+ unsigned int c;
2777+ if (get_user(c, (unsigned int *)pc+i))
2778+ printk(KERN_CONT "???????? ");
2779+ else
2780+ printk(KERN_CONT "%08x ", c);
2781+ }
2782+ printk("\n");
2783+}
2784+#endif
2785+
2786 /*
2787 * This routine handles page faults. It determines the address,
2788 * and the problem, and then passes it off to one of the appropriate
2789diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
2790index 302d779..7d35bf8 100644
2791--- a/arch/mips/mm/mmap.c
2792+++ b/arch/mips/mm/mmap.c
2793@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2794 do_color_align = 1;
2795
2796 /* requesting a specific address */
2797+
2798+#ifdef CONFIG_PAX_RANDMMAP
2799+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2800+#endif
2801+
2802 if (addr) {
2803 if (do_color_align)
2804 addr = COLOUR_ALIGN(addr, pgoff);
2805@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2806 addr = PAGE_ALIGN(addr);
2807
2808 vma = find_vma(mm, addr);
2809- if (TASK_SIZE - len >= addr &&
2810- (!vma || addr + len <= vma->vm_start))
2811+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
2812 return addr;
2813 }
2814
2815@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2816 /* At this point: (!vma || addr < vma->vm_end). */
2817 if (TASK_SIZE - len < addr)
2818 return -ENOMEM;
2819- if (!vma || addr + len <= vma->vm_start)
2820+ if (check_heap_stack_gap(vmm, addr, len))
2821 return addr;
2822 addr = vma->vm_end;
2823 if (do_color_align)
2824@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2825 /* make sure it can fit in the remaining address space */
2826 if (likely(addr > len)) {
2827 vma = find_vma(mm, addr - len);
2828- if (!vma || addr <= vma->vm_start) {
2829+ if (check_heap_stack_gap(vmm, addr - len, len))
2830 /* cache the address as a hint for next time */
2831 return mm->free_area_cache = addr - len;
2832 }
2833@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2834 * return with success:
2835 */
2836 vma = find_vma(mm, addr);
2837- if (likely(!vma || addr + len <= vma->vm_start)) {
2838+ if (check_heap_stack_gap(vmm, addr, len)) {
2839 /* cache the address as a hint for next time */
2840 return mm->free_area_cache = addr;
2841 }
2842@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2843 mm->unmap_area = arch_unmap_area_topdown;
2844 }
2845 }
2846-
2847-static inline unsigned long brk_rnd(void)
2848-{
2849- unsigned long rnd = get_random_int();
2850-
2851- rnd = rnd << PAGE_SHIFT;
2852- /* 8MB for 32bit, 256MB for 64bit */
2853- if (TASK_IS_32BIT_ADDR)
2854- rnd = rnd & 0x7ffffful;
2855- else
2856- rnd = rnd & 0xffffffful;
2857-
2858- return rnd;
2859-}
2860-
2861-unsigned long arch_randomize_brk(struct mm_struct *mm)
2862-{
2863- unsigned long base = mm->brk;
2864- unsigned long ret;
2865-
2866- ret = PAGE_ALIGN(base + brk_rnd());
2867-
2868- if (ret < mm->brk)
2869- return mm->brk;
2870-
2871- return ret;
2872-}
2873diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2874index 967d144..db12197 100644
2875--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
2876+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2877@@ -11,12 +11,14 @@
2878 #ifndef _ASM_PROC_CACHE_H
2879 #define _ASM_PROC_CACHE_H
2880
2881+#include <linux/const.h>
2882+
2883 /* L1 cache */
2884
2885 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2886 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
2887-#define L1_CACHE_BYTES 16 /* bytes per entry */
2888 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
2889+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2890 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
2891
2892 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
2893diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2894index bcb5df2..84fabd2 100644
2895--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2896+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2897@@ -16,13 +16,15 @@
2898 #ifndef _ASM_PROC_CACHE_H
2899 #define _ASM_PROC_CACHE_H
2900
2901+#include <linux/const.h>
2902+
2903 /*
2904 * L1 cache
2905 */
2906 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2907 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
2908-#define L1_CACHE_BYTES 32 /* bytes per entry */
2909 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
2910+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2911 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
2912
2913 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
2914diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
2915index 4ce7a01..449202a 100644
2916--- a/arch/openrisc/include/asm/cache.h
2917+++ b/arch/openrisc/include/asm/cache.h
2918@@ -19,11 +19,13 @@
2919 #ifndef __ASM_OPENRISC_CACHE_H
2920 #define __ASM_OPENRISC_CACHE_H
2921
2922+#include <linux/const.h>
2923+
2924 /* FIXME: How can we replace these with values from the CPU...
2925 * they shouldn't be hard-coded!
2926 */
2927
2928-#define L1_CACHE_BYTES 16
2929 #define L1_CACHE_SHIFT 4
2930+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2931
2932 #endif /* __ASM_OPENRISC_CACHE_H */
2933diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
2934index 4054b31..a10c105 100644
2935--- a/arch/parisc/include/asm/atomic.h
2936+++ b/arch/parisc/include/asm/atomic.h
2937@@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2938
2939 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2940
2941+#define atomic64_read_unchecked(v) atomic64_read(v)
2942+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2943+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2944+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2945+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2946+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2947+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2948+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2949+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2950+
2951 #endif /* !CONFIG_64BIT */
2952
2953
2954diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
2955index 47f11c7..3420df2 100644
2956--- a/arch/parisc/include/asm/cache.h
2957+++ b/arch/parisc/include/asm/cache.h
2958@@ -5,6 +5,7 @@
2959 #ifndef __ARCH_PARISC_CACHE_H
2960 #define __ARCH_PARISC_CACHE_H
2961
2962+#include <linux/const.h>
2963
2964 /*
2965 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
2966@@ -15,13 +16,13 @@
2967 * just ruin performance.
2968 */
2969 #ifdef CONFIG_PA20
2970-#define L1_CACHE_BYTES 64
2971 #define L1_CACHE_SHIFT 6
2972 #else
2973-#define L1_CACHE_BYTES 32
2974 #define L1_CACHE_SHIFT 5
2975 #endif
2976
2977+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2978+
2979 #ifndef __ASSEMBLY__
2980
2981 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2982diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2983index 19f6cb1..6c78cf2 100644
2984--- a/arch/parisc/include/asm/elf.h
2985+++ b/arch/parisc/include/asm/elf.h
2986@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
2987
2988 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2989
2990+#ifdef CONFIG_PAX_ASLR
2991+#define PAX_ELF_ET_DYN_BASE 0x10000UL
2992+
2993+#define PAX_DELTA_MMAP_LEN 16
2994+#define PAX_DELTA_STACK_LEN 16
2995+#endif
2996+
2997 /* This yields a mask that user programs can use to figure out what
2998 instruction set this CPU supports. This could be done in user space,
2999 but it's not easy, and we've already done it here. */
3000diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3001index 22dadeb..f6c2be4 100644
3002--- a/arch/parisc/include/asm/pgtable.h
3003+++ b/arch/parisc/include/asm/pgtable.h
3004@@ -210,6 +210,17 @@ struct vm_area_struct;
3005 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3006 #define PAGE_COPY PAGE_EXECREAD
3007 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3008+
3009+#ifdef CONFIG_PAX_PAGEEXEC
3010+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3011+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3012+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3013+#else
3014+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3015+# define PAGE_COPY_NOEXEC PAGE_COPY
3016+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3017+#endif
3018+
3019 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3020 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3021 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3022diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3023index 5e34ccf..672bc9c 100644
3024--- a/arch/parisc/kernel/module.c
3025+++ b/arch/parisc/kernel/module.c
3026@@ -98,16 +98,38 @@
3027
3028 /* three functions to determine where in the module core
3029 * or init pieces the location is */
3030+static inline int in_init_rx(struct module *me, void *loc)
3031+{
3032+ return (loc >= me->module_init_rx &&
3033+ loc < (me->module_init_rx + me->init_size_rx));
3034+}
3035+
3036+static inline int in_init_rw(struct module *me, void *loc)
3037+{
3038+ return (loc >= me->module_init_rw &&
3039+ loc < (me->module_init_rw + me->init_size_rw));
3040+}
3041+
3042 static inline int in_init(struct module *me, void *loc)
3043 {
3044- return (loc >= me->module_init &&
3045- loc <= (me->module_init + me->init_size));
3046+ return in_init_rx(me, loc) || in_init_rw(me, loc);
3047+}
3048+
3049+static inline int in_core_rx(struct module *me, void *loc)
3050+{
3051+ return (loc >= me->module_core_rx &&
3052+ loc < (me->module_core_rx + me->core_size_rx));
3053+}
3054+
3055+static inline int in_core_rw(struct module *me, void *loc)
3056+{
3057+ return (loc >= me->module_core_rw &&
3058+ loc < (me->module_core_rw + me->core_size_rw));
3059 }
3060
3061 static inline int in_core(struct module *me, void *loc)
3062 {
3063- return (loc >= me->module_core &&
3064- loc <= (me->module_core + me->core_size));
3065+ return in_core_rx(me, loc) || in_core_rw(me, loc);
3066 }
3067
3068 static inline int in_local(struct module *me, void *loc)
3069@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3070 }
3071
3072 /* align things a bit */
3073- me->core_size = ALIGN(me->core_size, 16);
3074- me->arch.got_offset = me->core_size;
3075- me->core_size += gots * sizeof(struct got_entry);
3076+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3077+ me->arch.got_offset = me->core_size_rw;
3078+ me->core_size_rw += gots * sizeof(struct got_entry);
3079
3080- me->core_size = ALIGN(me->core_size, 16);
3081- me->arch.fdesc_offset = me->core_size;
3082- me->core_size += fdescs * sizeof(Elf_Fdesc);
3083+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3084+ me->arch.fdesc_offset = me->core_size_rw;
3085+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3086
3087 me->arch.got_max = gots;
3088 me->arch.fdesc_max = fdescs;
3089@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3090
3091 BUG_ON(value == 0);
3092
3093- got = me->module_core + me->arch.got_offset;
3094+ got = me->module_core_rw + me->arch.got_offset;
3095 for (i = 0; got[i].addr; i++)
3096 if (got[i].addr == value)
3097 goto out;
3098@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3099 #ifdef CONFIG_64BIT
3100 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3101 {
3102- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3103+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3104
3105 if (!value) {
3106 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3107@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3108
3109 /* Create new one */
3110 fdesc->addr = value;
3111- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3112+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3113 return (Elf_Addr)fdesc;
3114 }
3115 #endif /* CONFIG_64BIT */
3116@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3117
3118 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3119 end = table + sechdrs[me->arch.unwind_section].sh_size;
3120- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3121+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3122
3123 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3124 me->arch.unwind_section, table, end, gp);
3125diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3126index c9b9322..02d8940 100644
3127--- a/arch/parisc/kernel/sys_parisc.c
3128+++ b/arch/parisc/kernel/sys_parisc.c
3129@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3130 /* At this point: (!vma || addr < vma->vm_end). */
3131 if (TASK_SIZE - len < addr)
3132 return -ENOMEM;
3133- if (!vma || addr + len <= vma->vm_start)
3134+ if (check_heap_stack_gap(vma, addr, len))
3135 return addr;
3136 addr = vma->vm_end;
3137 }
3138@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3139 /* At this point: (!vma || addr < vma->vm_end). */
3140 if (TASK_SIZE - len < addr)
3141 return -ENOMEM;
3142- if (!vma || addr + len <= vma->vm_start)
3143+ if (check_heap_stack_gap(vma, addr, len))
3144 return addr;
3145 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3146 if (addr < vma->vm_end) /* handle wraparound */
3147@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3148 if (flags & MAP_FIXED)
3149 return addr;
3150 if (!addr)
3151- addr = TASK_UNMAPPED_BASE;
3152+ addr = current->mm->mmap_base;
3153
3154 if (filp) {
3155 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3156diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3157index f19e660..414fe24 100644
3158--- a/arch/parisc/kernel/traps.c
3159+++ b/arch/parisc/kernel/traps.c
3160@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3161
3162 down_read(&current->mm->mmap_sem);
3163 vma = find_vma(current->mm,regs->iaoq[0]);
3164- if (vma && (regs->iaoq[0] >= vma->vm_start)
3165- && (vma->vm_flags & VM_EXEC)) {
3166-
3167+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3168 fault_address = regs->iaoq[0];
3169 fault_space = regs->iasq[0];
3170
3171diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3172index 18162ce..94de376 100644
3173--- a/arch/parisc/mm/fault.c
3174+++ b/arch/parisc/mm/fault.c
3175@@ -15,6 +15,7 @@
3176 #include <linux/sched.h>
3177 #include <linux/interrupt.h>
3178 #include <linux/module.h>
3179+#include <linux/unistd.h>
3180
3181 #include <asm/uaccess.h>
3182 #include <asm/traps.h>
3183@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3184 static unsigned long
3185 parisc_acctyp(unsigned long code, unsigned int inst)
3186 {
3187- if (code == 6 || code == 16)
3188+ if (code == 6 || code == 7 || code == 16)
3189 return VM_EXEC;
3190
3191 switch (inst & 0xf0000000) {
3192@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3193 }
3194 #endif
3195
3196+#ifdef CONFIG_PAX_PAGEEXEC
3197+/*
3198+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3199+ *
3200+ * returns 1 when task should be killed
3201+ * 2 when rt_sigreturn trampoline was detected
3202+ * 3 when unpatched PLT trampoline was detected
3203+ */
3204+static int pax_handle_fetch_fault(struct pt_regs *regs)
3205+{
3206+
3207+#ifdef CONFIG_PAX_EMUPLT
3208+ int err;
3209+
3210+ do { /* PaX: unpatched PLT emulation */
3211+ unsigned int bl, depwi;
3212+
3213+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3214+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3215+
3216+ if (err)
3217+ break;
3218+
3219+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3220+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3221+
3222+ err = get_user(ldw, (unsigned int *)addr);
3223+ err |= get_user(bv, (unsigned int *)(addr+4));
3224+ err |= get_user(ldw2, (unsigned int *)(addr+8));
3225+
3226+ if (err)
3227+ break;
3228+
3229+ if (ldw == 0x0E801096U &&
3230+ bv == 0xEAC0C000U &&
3231+ ldw2 == 0x0E881095U)
3232+ {
3233+ unsigned int resolver, map;
3234+
3235+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3236+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3237+ if (err)
3238+ break;
3239+
3240+ regs->gr[20] = instruction_pointer(regs)+8;
3241+ regs->gr[21] = map;
3242+ regs->gr[22] = resolver;
3243+ regs->iaoq[0] = resolver | 3UL;
3244+ regs->iaoq[1] = regs->iaoq[0] + 4;
3245+ return 3;
3246+ }
3247+ }
3248+ } while (0);
3249+#endif
3250+
3251+#ifdef CONFIG_PAX_EMUTRAMP
3252+
3253+#ifndef CONFIG_PAX_EMUSIGRT
3254+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3255+ return 1;
3256+#endif
3257+
3258+ do { /* PaX: rt_sigreturn emulation */
3259+ unsigned int ldi1, ldi2, bel, nop;
3260+
3261+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3262+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3263+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3264+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3265+
3266+ if (err)
3267+ break;
3268+
3269+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3270+ ldi2 == 0x3414015AU &&
3271+ bel == 0xE4008200U &&
3272+ nop == 0x08000240U)
3273+ {
3274+ regs->gr[25] = (ldi1 & 2) >> 1;
3275+ regs->gr[20] = __NR_rt_sigreturn;
3276+ regs->gr[31] = regs->iaoq[1] + 16;
3277+ regs->sr[0] = regs->iasq[1];
3278+ regs->iaoq[0] = 0x100UL;
3279+ regs->iaoq[1] = regs->iaoq[0] + 4;
3280+ regs->iasq[0] = regs->sr[2];
3281+ regs->iasq[1] = regs->sr[2];
3282+ return 2;
3283+ }
3284+ } while (0);
3285+#endif
3286+
3287+ return 1;
3288+}
3289+
3290+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3291+{
3292+ unsigned long i;
3293+
3294+ printk(KERN_ERR "PAX: bytes at PC: ");
3295+ for (i = 0; i < 5; i++) {
3296+ unsigned int c;
3297+ if (get_user(c, (unsigned int *)pc+i))
3298+ printk(KERN_CONT "???????? ");
3299+ else
3300+ printk(KERN_CONT "%08x ", c);
3301+ }
3302+ printk("\n");
3303+}
3304+#endif
3305+
3306 int fixup_exception(struct pt_regs *regs)
3307 {
3308 const struct exception_table_entry *fix;
3309@@ -192,8 +303,33 @@ good_area:
3310
3311 acc_type = parisc_acctyp(code,regs->iir);
3312
3313- if ((vma->vm_flags & acc_type) != acc_type)
3314+ if ((vma->vm_flags & acc_type) != acc_type) {
3315+
3316+#ifdef CONFIG_PAX_PAGEEXEC
3317+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3318+ (address & ~3UL) == instruction_pointer(regs))
3319+ {
3320+ up_read(&mm->mmap_sem);
3321+ switch (pax_handle_fetch_fault(regs)) {
3322+
3323+#ifdef CONFIG_PAX_EMUPLT
3324+ case 3:
3325+ return;
3326+#endif
3327+
3328+#ifdef CONFIG_PAX_EMUTRAMP
3329+ case 2:
3330+ return;
3331+#endif
3332+
3333+ }
3334+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3335+ do_group_exit(SIGKILL);
3336+ }
3337+#endif
3338+
3339 goto bad_area;
3340+ }
3341
3342 /*
3343 * If for any reason at all we couldn't handle the fault, make
3344diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3345index 02e41b5..ec6e26c 100644
3346--- a/arch/powerpc/include/asm/atomic.h
3347+++ b/arch/powerpc/include/asm/atomic.h
3348@@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3349
3350 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3351
3352+#define atomic64_read_unchecked(v) atomic64_read(v)
3353+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3354+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3355+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3356+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3357+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3358+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3359+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3360+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3361+
3362 #endif /* __powerpc64__ */
3363
3364 #endif /* __KERNEL__ */
3365diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3366index 4b50941..5605819 100644
3367--- a/arch/powerpc/include/asm/cache.h
3368+++ b/arch/powerpc/include/asm/cache.h
3369@@ -3,6 +3,7 @@
3370
3371 #ifdef __KERNEL__
3372
3373+#include <linux/const.h>
3374
3375 /* bytes per L1 cache line */
3376 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3377@@ -22,7 +23,7 @@
3378 #define L1_CACHE_SHIFT 7
3379 #endif
3380
3381-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3382+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3383
3384 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3385
3386diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3387index 3bf9cca..e7457d0 100644
3388--- a/arch/powerpc/include/asm/elf.h
3389+++ b/arch/powerpc/include/asm/elf.h
3390@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3391 the loader. We need to make sure that it is out of the way of the program
3392 that it will "exec", and that there is sufficient room for the brk. */
3393
3394-extern unsigned long randomize_et_dyn(unsigned long base);
3395-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3396+#define ELF_ET_DYN_BASE (0x20000000)
3397+
3398+#ifdef CONFIG_PAX_ASLR
3399+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3400+
3401+#ifdef __powerpc64__
3402+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3403+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3404+#else
3405+#define PAX_DELTA_MMAP_LEN 15
3406+#define PAX_DELTA_STACK_LEN 15
3407+#endif
3408+#endif
3409
3410 /*
3411 * Our registers are always unsigned longs, whether we're a 32 bit
3412@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3413 (0x7ff >> (PAGE_SHIFT - 12)) : \
3414 (0x3ffff >> (PAGE_SHIFT - 12)))
3415
3416-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3417-#define arch_randomize_brk arch_randomize_brk
3418-
3419 #endif /* __KERNEL__ */
3420
3421 /*
3422diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3423index bca8fdc..61e9580 100644
3424--- a/arch/powerpc/include/asm/kmap_types.h
3425+++ b/arch/powerpc/include/asm/kmap_types.h
3426@@ -27,6 +27,7 @@ enum km_type {
3427 KM_PPC_SYNC_PAGE,
3428 KM_PPC_SYNC_ICACHE,
3429 KM_KDB,
3430+ KM_CLEARPAGE,
3431 KM_TYPE_NR
3432 };
3433
3434diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3435index d4a7f64..451de1c 100644
3436--- a/arch/powerpc/include/asm/mman.h
3437+++ b/arch/powerpc/include/asm/mman.h
3438@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3439 }
3440 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3441
3442-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3443+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3444 {
3445 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3446 }
3447diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3448index f072e97..b436dee 100644
3449--- a/arch/powerpc/include/asm/page.h
3450+++ b/arch/powerpc/include/asm/page.h
3451@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3452 * and needs to be executable. This means the whole heap ends
3453 * up being executable.
3454 */
3455-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3456- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3457+#define VM_DATA_DEFAULT_FLAGS32 \
3458+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3459+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3460
3461 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3462 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3463@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3464 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3465 #endif
3466
3467+#define ktla_ktva(addr) (addr)
3468+#define ktva_ktla(addr) (addr)
3469+
3470 /*
3471 * Use the top bit of the higher-level page table entries to indicate whether
3472 * the entries we point to contain hugepages. This works because we know that
3473diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3474index fed85e6..da5c71b 100644
3475--- a/arch/powerpc/include/asm/page_64.h
3476+++ b/arch/powerpc/include/asm/page_64.h
3477@@ -146,15 +146,18 @@ do { \
3478 * stack by default, so in the absence of a PT_GNU_STACK program header
3479 * we turn execute permission off.
3480 */
3481-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3482- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3483+#define VM_STACK_DEFAULT_FLAGS32 \
3484+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3485+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3486
3487 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3488 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3489
3490+#ifndef CONFIG_PAX_PAGEEXEC
3491 #define VM_STACK_DEFAULT_FLAGS \
3492 (is_32bit_task() ? \
3493 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3494+#endif
3495
3496 #include <asm-generic/getorder.h>
3497
3498diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3499index 2e0e411..7899c68 100644
3500--- a/arch/powerpc/include/asm/pgtable.h
3501+++ b/arch/powerpc/include/asm/pgtable.h
3502@@ -2,6 +2,7 @@
3503 #define _ASM_POWERPC_PGTABLE_H
3504 #ifdef __KERNEL__
3505
3506+#include <linux/const.h>
3507 #ifndef __ASSEMBLY__
3508 #include <asm/processor.h> /* For TASK_SIZE */
3509 #include <asm/mmu.h>
3510diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3511index 4aad413..85d86bf 100644
3512--- a/arch/powerpc/include/asm/pte-hash32.h
3513+++ b/arch/powerpc/include/asm/pte-hash32.h
3514@@ -21,6 +21,7 @@
3515 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3516 #define _PAGE_USER 0x004 /* usermode access allowed */
3517 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3518+#define _PAGE_EXEC _PAGE_GUARDED
3519 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3520 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3521 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3522diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3523index 7fdc2c0..e47a9b02d3 100644
3524--- a/arch/powerpc/include/asm/reg.h
3525+++ b/arch/powerpc/include/asm/reg.h
3526@@ -212,6 +212,7 @@
3527 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3528 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3529 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3530+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3531 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3532 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3533 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3534diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
3535index c377457..3c69fbc 100644
3536--- a/arch/powerpc/include/asm/system.h
3537+++ b/arch/powerpc/include/asm/system.h
3538@@ -539,7 +539,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
3539 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3540 #endif
3541
3542-extern unsigned long arch_align_stack(unsigned long sp);
3543+#define arch_align_stack(x) ((x) & ~0xfUL)
3544
3545 /* Used in very early kernel initialization. */
3546 extern unsigned long reloc_offset(void);
3547diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3548index bd0fb84..a42a14b 100644
3549--- a/arch/powerpc/include/asm/uaccess.h
3550+++ b/arch/powerpc/include/asm/uaccess.h
3551@@ -13,6 +13,8 @@
3552 #define VERIFY_READ 0
3553 #define VERIFY_WRITE 1
3554
3555+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3556+
3557 /*
3558 * The fs value determines whether argument validity checking should be
3559 * performed or not. If get_fs() == USER_DS, checking is performed, with
3560@@ -327,52 +329,6 @@ do { \
3561 extern unsigned long __copy_tofrom_user(void __user *to,
3562 const void __user *from, unsigned long size);
3563
3564-#ifndef __powerpc64__
3565-
3566-static inline unsigned long copy_from_user(void *to,
3567- const void __user *from, unsigned long n)
3568-{
3569- unsigned long over;
3570-
3571- if (access_ok(VERIFY_READ, from, n))
3572- return __copy_tofrom_user((__force void __user *)to, from, n);
3573- if ((unsigned long)from < TASK_SIZE) {
3574- over = (unsigned long)from + n - TASK_SIZE;
3575- return __copy_tofrom_user((__force void __user *)to, from,
3576- n - over) + over;
3577- }
3578- return n;
3579-}
3580-
3581-static inline unsigned long copy_to_user(void __user *to,
3582- const void *from, unsigned long n)
3583-{
3584- unsigned long over;
3585-
3586- if (access_ok(VERIFY_WRITE, to, n))
3587- return __copy_tofrom_user(to, (__force void __user *)from, n);
3588- if ((unsigned long)to < TASK_SIZE) {
3589- over = (unsigned long)to + n - TASK_SIZE;
3590- return __copy_tofrom_user(to, (__force void __user *)from,
3591- n - over) + over;
3592- }
3593- return n;
3594-}
3595-
3596-#else /* __powerpc64__ */
3597-
3598-#define __copy_in_user(to, from, size) \
3599- __copy_tofrom_user((to), (from), (size))
3600-
3601-extern unsigned long copy_from_user(void *to, const void __user *from,
3602- unsigned long n);
3603-extern unsigned long copy_to_user(void __user *to, const void *from,
3604- unsigned long n);
3605-extern unsigned long copy_in_user(void __user *to, const void __user *from,
3606- unsigned long n);
3607-
3608-#endif /* __powerpc64__ */
3609-
3610 static inline unsigned long __copy_from_user_inatomic(void *to,
3611 const void __user *from, unsigned long n)
3612 {
3613@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3614 if (ret == 0)
3615 return 0;
3616 }
3617+
3618+ if (!__builtin_constant_p(n))
3619+ check_object_size(to, n, false);
3620+
3621 return __copy_tofrom_user((__force void __user *)to, from, n);
3622 }
3623
3624@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3625 if (ret == 0)
3626 return 0;
3627 }
3628+
3629+ if (!__builtin_constant_p(n))
3630+ check_object_size(from, n, true);
3631+
3632 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3633 }
3634
3635@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3636 return __copy_to_user_inatomic(to, from, size);
3637 }
3638
3639+#ifndef __powerpc64__
3640+
3641+static inline unsigned long __must_check copy_from_user(void *to,
3642+ const void __user *from, unsigned long n)
3643+{
3644+ unsigned long over;
3645+
3646+ if ((long)n < 0)
3647+ return n;
3648+
3649+ if (access_ok(VERIFY_READ, from, n)) {
3650+ if (!__builtin_constant_p(n))
3651+ check_object_size(to, n, false);
3652+ return __copy_tofrom_user((__force void __user *)to, from, n);
3653+ }
3654+ if ((unsigned long)from < TASK_SIZE) {
3655+ over = (unsigned long)from + n - TASK_SIZE;
3656+ if (!__builtin_constant_p(n - over))
3657+ check_object_size(to, n - over, false);
3658+ return __copy_tofrom_user((__force void __user *)to, from,
3659+ n - over) + over;
3660+ }
3661+ return n;
3662+}
3663+
3664+static inline unsigned long __must_check copy_to_user(void __user *to,
3665+ const void *from, unsigned long n)
3666+{
3667+ unsigned long over;
3668+
3669+ if ((long)n < 0)
3670+ return n;
3671+
3672+ if (access_ok(VERIFY_WRITE, to, n)) {
3673+ if (!__builtin_constant_p(n))
3674+ check_object_size(from, n, true);
3675+ return __copy_tofrom_user(to, (__force void __user *)from, n);
3676+ }
3677+ if ((unsigned long)to < TASK_SIZE) {
3678+ over = (unsigned long)to + n - TASK_SIZE;
3679+ if (!__builtin_constant_p(n))
3680+ check_object_size(from, n - over, true);
3681+ return __copy_tofrom_user(to, (__force void __user *)from,
3682+ n - over) + over;
3683+ }
3684+ return n;
3685+}
3686+
3687+#else /* __powerpc64__ */
3688+
3689+#define __copy_in_user(to, from, size) \
3690+ __copy_tofrom_user((to), (from), (size))
3691+
3692+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3693+{
3694+ if ((long)n < 0 || n > INT_MAX)
3695+ return n;
3696+
3697+ if (!__builtin_constant_p(n))
3698+ check_object_size(to, n, false);
3699+
3700+ if (likely(access_ok(VERIFY_READ, from, n)))
3701+ n = __copy_from_user(to, from, n);
3702+ else
3703+ memset(to, 0, n);
3704+ return n;
3705+}
3706+
3707+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3708+{
3709+ if ((long)n < 0 || n > INT_MAX)
3710+ return n;
3711+
3712+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
3713+ if (!__builtin_constant_p(n))
3714+ check_object_size(from, n, true);
3715+ n = __copy_to_user(to, from, n);
3716+ }
3717+ return n;
3718+}
3719+
3720+extern unsigned long copy_in_user(void __user *to, const void __user *from,
3721+ unsigned long n);
3722+
3723+#endif /* __powerpc64__ */
3724+
3725 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3726
3727 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3728diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3729index 429983c..7af363b 100644
3730--- a/arch/powerpc/kernel/exceptions-64e.S
3731+++ b/arch/powerpc/kernel/exceptions-64e.S
3732@@ -587,6 +587,7 @@ storage_fault_common:
3733 std r14,_DAR(r1)
3734 std r15,_DSISR(r1)
3735 addi r3,r1,STACK_FRAME_OVERHEAD
3736+ bl .save_nvgprs
3737 mr r4,r14
3738 mr r5,r15
3739 ld r14,PACA_EXGEN+EX_R14(r13)
3740@@ -596,8 +597,7 @@ storage_fault_common:
3741 cmpdi r3,0
3742 bne- 1f
3743 b .ret_from_except_lite
3744-1: bl .save_nvgprs
3745- mr r5,r3
3746+1: mr r5,r3
3747 addi r3,r1,STACK_FRAME_OVERHEAD
3748 ld r4,_DAR(r1)
3749 bl .bad_page_fault
3750diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3751index 15c5a4f..22a4000 100644
3752--- a/arch/powerpc/kernel/exceptions-64s.S
3753+++ b/arch/powerpc/kernel/exceptions-64s.S
3754@@ -1004,10 +1004,10 @@ handle_page_fault:
3755 11: ld r4,_DAR(r1)
3756 ld r5,_DSISR(r1)
3757 addi r3,r1,STACK_FRAME_OVERHEAD
3758+ bl .save_nvgprs
3759 bl .do_page_fault
3760 cmpdi r3,0
3761 beq+ 13f
3762- bl .save_nvgprs
3763 mr r5,r3
3764 addi r3,r1,STACK_FRAME_OVERHEAD
3765 lwz r4,_DAR(r1)
3766diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
3767index 01e2877..a1ba360 100644
3768--- a/arch/powerpc/kernel/irq.c
3769+++ b/arch/powerpc/kernel/irq.c
3770@@ -560,9 +560,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
3771 host->ops = ops;
3772 host->of_node = of_node_get(of_node);
3773
3774- if (host->ops->match == NULL)
3775- host->ops->match = default_irq_host_match;
3776-
3777 raw_spin_lock_irqsave(&irq_big_lock, flags);
3778
3779 /* If it's a legacy controller, check for duplicates and
3780@@ -635,7 +632,12 @@ struct irq_host *irq_find_host(struct device_node *node)
3781 */
3782 raw_spin_lock_irqsave(&irq_big_lock, flags);
3783 list_for_each_entry(h, &irq_hosts, link)
3784- if (h->ops->match(h, node)) {
3785+ if (h->ops->match) {
3786+ if (h->ops->match(h, node)) {
3787+ found = h;
3788+ break;
3789+ }
3790+ } else if (default_irq_host_match(h, node)) {
3791 found = h;
3792 break;
3793 }
3794diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3795index 0b6d796..d760ddb 100644
3796--- a/arch/powerpc/kernel/module_32.c
3797+++ b/arch/powerpc/kernel/module_32.c
3798@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3799 me->arch.core_plt_section = i;
3800 }
3801 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3802- printk("Module doesn't contain .plt or .init.plt sections.\n");
3803+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3804 return -ENOEXEC;
3805 }
3806
3807@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
3808
3809 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3810 /* Init, or core PLT? */
3811- if (location >= mod->module_core
3812- && location < mod->module_core + mod->core_size)
3813+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3814+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3815 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3816- else
3817+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3818+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3819 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3820+ else {
3821+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3822+ return ~0UL;
3823+ }
3824
3825 /* Find this entry, or if that fails, the next avail. entry */
3826 while (entry->jump[0]) {
3827diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3828index d817ab0..b23b18e 100644
3829--- a/arch/powerpc/kernel/process.c
3830+++ b/arch/powerpc/kernel/process.c
3831@@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
3832 * Lookup NIP late so we have the best change of getting the
3833 * above info out without failing
3834 */
3835- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3836- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3837+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3838+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3839 #endif
3840 show_stack(current, (unsigned long *) regs->gpr[1]);
3841 if (!user_mode(regs))
3842@@ -1181,10 +1181,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3843 newsp = stack[0];
3844 ip = stack[STACK_FRAME_LR_SAVE];
3845 if (!firstframe || ip != lr) {
3846- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3847+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3848 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3849 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3850- printk(" (%pS)",
3851+ printk(" (%pA)",
3852 (void *)current->ret_stack[curr_frame].ret);
3853 curr_frame--;
3854 }
3855@@ -1204,7 +1204,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3856 struct pt_regs *regs = (struct pt_regs *)
3857 (sp + STACK_FRAME_OVERHEAD);
3858 lr = regs->link;
3859- printk("--- Exception: %lx at %pS\n LR = %pS\n",
3860+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
3861 regs->trap, (void *)regs->nip, (void *)lr);
3862 firstframe = 1;
3863 }
3864@@ -1279,58 +1279,3 @@ void thread_info_cache_init(void)
3865 }
3866
3867 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3868-
3869-unsigned long arch_align_stack(unsigned long sp)
3870-{
3871- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3872- sp -= get_random_int() & ~PAGE_MASK;
3873- return sp & ~0xf;
3874-}
3875-
3876-static inline unsigned long brk_rnd(void)
3877-{
3878- unsigned long rnd = 0;
3879-
3880- /* 8MB for 32bit, 1GB for 64bit */
3881- if (is_32bit_task())
3882- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3883- else
3884- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3885-
3886- return rnd << PAGE_SHIFT;
3887-}
3888-
3889-unsigned long arch_randomize_brk(struct mm_struct *mm)
3890-{
3891- unsigned long base = mm->brk;
3892- unsigned long ret;
3893-
3894-#ifdef CONFIG_PPC_STD_MMU_64
3895- /*
3896- * If we are using 1TB segments and we are allowed to randomise
3897- * the heap, we can put it above 1TB so it is backed by a 1TB
3898- * segment. Otherwise the heap will be in the bottom 1TB
3899- * which always uses 256MB segments and this may result in a
3900- * performance penalty.
3901- */
3902- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3903- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3904-#endif
3905-
3906- ret = PAGE_ALIGN(base + brk_rnd());
3907-
3908- if (ret < mm->brk)
3909- return mm->brk;
3910-
3911- return ret;
3912-}
3913-
3914-unsigned long randomize_et_dyn(unsigned long base)
3915-{
3916- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3917-
3918- if (ret < base)
3919- return base;
3920-
3921- return ret;
3922-}
3923diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3924index 836a5a1..27289a3 100644
3925--- a/arch/powerpc/kernel/signal_32.c
3926+++ b/arch/powerpc/kernel/signal_32.c
3927@@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3928 /* Save user registers on the stack */
3929 frame = &rt_sf->uc.uc_mcontext;
3930 addr = frame;
3931- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3932+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3933 if (save_user_regs(regs, frame, 0, 1))
3934 goto badframe;
3935 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3936diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3937index a50b5ec..547078a 100644
3938--- a/arch/powerpc/kernel/signal_64.c
3939+++ b/arch/powerpc/kernel/signal_64.c
3940@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3941 current->thread.fpscr.val = 0;
3942
3943 /* Set up to return from userspace. */
3944- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3945+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3946 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3947 } else {
3948 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3949diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3950index c091527..5592625 100644
3951--- a/arch/powerpc/kernel/traps.c
3952+++ b/arch/powerpc/kernel/traps.c
3953@@ -131,6 +131,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
3954 return flags;
3955 }
3956
3957+extern void gr_handle_kernel_exploit(void);
3958+
3959 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
3960 int signr)
3961 {
3962@@ -178,6 +180,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
3963 panic("Fatal exception in interrupt");
3964 if (panic_on_oops)
3965 panic("Fatal exception");
3966+
3967+ gr_handle_kernel_exploit();
3968+
3969 do_exit(signr);
3970 }
3971
3972diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3973index 7d14bb6..1305601 100644
3974--- a/arch/powerpc/kernel/vdso.c
3975+++ b/arch/powerpc/kernel/vdso.c
3976@@ -35,6 +35,7 @@
3977 #include <asm/firmware.h>
3978 #include <asm/vdso.h>
3979 #include <asm/vdso_datapage.h>
3980+#include <asm/mman.h>
3981
3982 #include "setup.h"
3983
3984@@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3985 vdso_base = VDSO32_MBASE;
3986 #endif
3987
3988- current->mm->context.vdso_base = 0;
3989+ current->mm->context.vdso_base = ~0UL;
3990
3991 /* vDSO has a problem and was disabled, just don't "enable" it for the
3992 * process
3993@@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3994 vdso_base = get_unmapped_area(NULL, vdso_base,
3995 (vdso_pages << PAGE_SHIFT) +
3996 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3997- 0, 0);
3998+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
3999 if (IS_ERR_VALUE(vdso_base)) {
4000 rc = vdso_base;
4001 goto fail_mmapsem;
4002diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4003index 5eea6f3..5d10396 100644
4004--- a/arch/powerpc/lib/usercopy_64.c
4005+++ b/arch/powerpc/lib/usercopy_64.c
4006@@ -9,22 +9,6 @@
4007 #include <linux/module.h>
4008 #include <asm/uaccess.h>
4009
4010-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4011-{
4012- if (likely(access_ok(VERIFY_READ, from, n)))
4013- n = __copy_from_user(to, from, n);
4014- else
4015- memset(to, 0, n);
4016- return n;
4017-}
4018-
4019-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4020-{
4021- if (likely(access_ok(VERIFY_WRITE, to, n)))
4022- n = __copy_to_user(to, from, n);
4023- return n;
4024-}
4025-
4026 unsigned long copy_in_user(void __user *to, const void __user *from,
4027 unsigned long n)
4028 {
4029@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4030 return n;
4031 }
4032
4033-EXPORT_SYMBOL(copy_from_user);
4034-EXPORT_SYMBOL(copy_to_user);
4035 EXPORT_SYMBOL(copy_in_user);
4036
4037diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4038index 2f0d1b0..36fb5cc 100644
4039--- a/arch/powerpc/mm/fault.c
4040+++ b/arch/powerpc/mm/fault.c
4041@@ -32,6 +32,10 @@
4042 #include <linux/perf_event.h>
4043 #include <linux/magic.h>
4044 #include <linux/ratelimit.h>
4045+#include <linux/slab.h>
4046+#include <linux/pagemap.h>
4047+#include <linux/compiler.h>
4048+#include <linux/unistd.h>
4049
4050 #include <asm/firmware.h>
4051 #include <asm/page.h>
4052@@ -43,6 +47,7 @@
4053 #include <asm/tlbflush.h>
4054 #include <asm/siginfo.h>
4055 #include <mm/mmu_decl.h>
4056+#include <asm/ptrace.h>
4057
4058 #include "icswx.h"
4059
4060@@ -68,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4061 }
4062 #endif
4063
4064+#ifdef CONFIG_PAX_PAGEEXEC
4065+/*
4066+ * PaX: decide what to do with offenders (regs->nip = fault address)
4067+ *
4068+ * returns 1 when task should be killed
4069+ */
4070+static int pax_handle_fetch_fault(struct pt_regs *regs)
4071+{
4072+ return 1;
4073+}
4074+
4075+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4076+{
4077+ unsigned long i;
4078+
4079+ printk(KERN_ERR "PAX: bytes at PC: ");
4080+ for (i = 0; i < 5; i++) {
4081+ unsigned int c;
4082+ if (get_user(c, (unsigned int __user *)pc+i))
4083+ printk(KERN_CONT "???????? ");
4084+ else
4085+ printk(KERN_CONT "%08x ", c);
4086+ }
4087+ printk("\n");
4088+}
4089+#endif
4090+
4091 /*
4092 * Check whether the instruction at regs->nip is a store using
4093 * an update addressing form which will update r1.
4094@@ -138,7 +170,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4095 * indicate errors in DSISR but can validly be set in SRR1.
4096 */
4097 if (trap == 0x400)
4098- error_code &= 0x48200000;
4099+ error_code &= 0x58200000;
4100 else
4101 is_write = error_code & DSISR_ISSTORE;
4102 #else
4103@@ -276,7 +308,7 @@ good_area:
4104 * "undefined". Of those that can be set, this is the only
4105 * one which seems bad.
4106 */
4107- if (error_code & 0x10000000)
4108+ if (error_code & DSISR_GUARDED)
4109 /* Guarded storage error. */
4110 goto bad_area;
4111 #endif /* CONFIG_8xx */
4112@@ -291,7 +323,7 @@ good_area:
4113 * processors use the same I/D cache coherency mechanism
4114 * as embedded.
4115 */
4116- if (error_code & DSISR_PROTFAULT)
4117+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4118 goto bad_area;
4119 #endif /* CONFIG_PPC_STD_MMU */
4120
4121@@ -360,6 +392,23 @@ bad_area:
4122 bad_area_nosemaphore:
4123 /* User mode accesses cause a SIGSEGV */
4124 if (user_mode(regs)) {
4125+
4126+#ifdef CONFIG_PAX_PAGEEXEC
4127+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4128+#ifdef CONFIG_PPC_STD_MMU
4129+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4130+#else
4131+ if (is_exec && regs->nip == address) {
4132+#endif
4133+ switch (pax_handle_fetch_fault(regs)) {
4134+ }
4135+
4136+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4137+ do_group_exit(SIGKILL);
4138+ }
4139+ }
4140+#endif
4141+
4142 _exception(SIGSEGV, regs, code, address);
4143 return 0;
4144 }
4145diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4146index 67a42ed..1c7210c 100644
4147--- a/arch/powerpc/mm/mmap_64.c
4148+++ b/arch/powerpc/mm/mmap_64.c
4149@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4150 */
4151 if (mmap_is_legacy()) {
4152 mm->mmap_base = TASK_UNMAPPED_BASE;
4153+
4154+#ifdef CONFIG_PAX_RANDMMAP
4155+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4156+ mm->mmap_base += mm->delta_mmap;
4157+#endif
4158+
4159 mm->get_unmapped_area = arch_get_unmapped_area;
4160 mm->unmap_area = arch_unmap_area;
4161 } else {
4162 mm->mmap_base = mmap_base();
4163+
4164+#ifdef CONFIG_PAX_RANDMMAP
4165+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4166+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4167+#endif
4168+
4169 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4170 mm->unmap_area = arch_unmap_area_topdown;
4171 }
4172diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4173index 73709f7..6b90313 100644
4174--- a/arch/powerpc/mm/slice.c
4175+++ b/arch/powerpc/mm/slice.c
4176@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4177 if ((mm->task_size - len) < addr)
4178 return 0;
4179 vma = find_vma(mm, addr);
4180- return (!vma || (addr + len) <= vma->vm_start);
4181+ return check_heap_stack_gap(vma, addr, len);
4182 }
4183
4184 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4185@@ -256,7 +256,7 @@ full_search:
4186 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4187 continue;
4188 }
4189- if (!vma || addr + len <= vma->vm_start) {
4190+ if (check_heap_stack_gap(vma, addr, len)) {
4191 /*
4192 * Remember the place where we stopped the search:
4193 */
4194@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4195 }
4196 }
4197
4198- addr = mm->mmap_base;
4199- while (addr > len) {
4200+ if (mm->mmap_base < len)
4201+ addr = -ENOMEM;
4202+ else
4203+ addr = mm->mmap_base - len;
4204+
4205+ while (!IS_ERR_VALUE(addr)) {
4206 /* Go down by chunk size */
4207- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4208+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
4209
4210 /* Check for hit with different page size */
4211 mask = slice_range_to_mask(addr, len);
4212@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4213 * return with success:
4214 */
4215 vma = find_vma(mm, addr);
4216- if (!vma || (addr + len) <= vma->vm_start) {
4217+ if (check_heap_stack_gap(vma, addr, len)) {
4218 /* remember the address as a hint for next time */
4219 if (use_cache)
4220 mm->free_area_cache = addr;
4221@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4222 mm->cached_hole_size = vma->vm_start - addr;
4223
4224 /* try just below the current vma->vm_start */
4225- addr = vma->vm_start;
4226+ addr = skip_heap_stack_gap(vma, len);
4227 }
4228
4229 /*
4230@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4231 if (fixed && addr > (mm->task_size - len))
4232 return -EINVAL;
4233
4234+#ifdef CONFIG_PAX_RANDMMAP
4235+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4236+ addr = 0;
4237+#endif
4238+
4239 /* If hint, make sure it matches our alignment restrictions */
4240 if (!fixed && addr) {
4241 addr = _ALIGN_UP(addr, 1ul << pshift);
4242diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4243index 8517d2a..d2738d4 100644
4244--- a/arch/s390/include/asm/atomic.h
4245+++ b/arch/s390/include/asm/atomic.h
4246@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4247 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4248 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4249
4250+#define atomic64_read_unchecked(v) atomic64_read(v)
4251+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4252+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4253+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4254+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4255+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4256+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4257+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4258+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4259+
4260 #define smp_mb__before_atomic_dec() smp_mb()
4261 #define smp_mb__after_atomic_dec() smp_mb()
4262 #define smp_mb__before_atomic_inc() smp_mb()
4263diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4264index 2a30d5a..5e5586f 100644
4265--- a/arch/s390/include/asm/cache.h
4266+++ b/arch/s390/include/asm/cache.h
4267@@ -11,8 +11,10 @@
4268 #ifndef __ARCH_S390_CACHE_H
4269 #define __ARCH_S390_CACHE_H
4270
4271-#define L1_CACHE_BYTES 256
4272+#include <linux/const.h>
4273+
4274 #define L1_CACHE_SHIFT 8
4275+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4276 #define NET_SKB_PAD 32
4277
4278 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4279diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4280index 547f1a6..0b22b53 100644
4281--- a/arch/s390/include/asm/elf.h
4282+++ b/arch/s390/include/asm/elf.h
4283@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
4284 the loader. We need to make sure that it is out of the way of the program
4285 that it will "exec", and that there is sufficient room for the brk. */
4286
4287-extern unsigned long randomize_et_dyn(unsigned long base);
4288-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4289+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4290+
4291+#ifdef CONFIG_PAX_ASLR
4292+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4293+
4294+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4295+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4296+#endif
4297
4298 /* This yields a mask that user programs can use to figure out what
4299 instruction set this CPU supports. */
4300@@ -211,7 +217,4 @@ struct linux_binprm;
4301 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4302 int arch_setup_additional_pages(struct linux_binprm *, int);
4303
4304-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4305-#define arch_randomize_brk arch_randomize_brk
4306-
4307 #endif
4308diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
4309index d73cc6b..1a296ad 100644
4310--- a/arch/s390/include/asm/system.h
4311+++ b/arch/s390/include/asm/system.h
4312@@ -260,7 +260,7 @@ extern void (*_machine_restart)(char *command);
4313 extern void (*_machine_halt)(void);
4314 extern void (*_machine_power_off)(void);
4315
4316-extern unsigned long arch_align_stack(unsigned long sp);
4317+#define arch_align_stack(x) ((x) & ~0xfUL)
4318
4319 static inline int tprot(unsigned long addr)
4320 {
4321diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4322index 2b23885..e136e31 100644
4323--- a/arch/s390/include/asm/uaccess.h
4324+++ b/arch/s390/include/asm/uaccess.h
4325@@ -235,6 +235,10 @@ static inline unsigned long __must_check
4326 copy_to_user(void __user *to, const void *from, unsigned long n)
4327 {
4328 might_fault();
4329+
4330+ if ((long)n < 0)
4331+ return n;
4332+
4333 if (access_ok(VERIFY_WRITE, to, n))
4334 n = __copy_to_user(to, from, n);
4335 return n;
4336@@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4337 static inline unsigned long __must_check
4338 __copy_from_user(void *to, const void __user *from, unsigned long n)
4339 {
4340+ if ((long)n < 0)
4341+ return n;
4342+
4343 if (__builtin_constant_p(n) && (n <= 256))
4344 return uaccess.copy_from_user_small(n, from, to);
4345 else
4346@@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
4347 unsigned int sz = __compiletime_object_size(to);
4348
4349 might_fault();
4350+
4351+ if ((long)n < 0)
4352+ return n;
4353+
4354 if (unlikely(sz != -1 && sz < n)) {
4355 copy_from_user_overflow();
4356 return n;
4357diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4358index dfcb343..eda788a 100644
4359--- a/arch/s390/kernel/module.c
4360+++ b/arch/s390/kernel/module.c
4361@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4362
4363 /* Increase core size by size of got & plt and set start
4364 offsets for got and plt. */
4365- me->core_size = ALIGN(me->core_size, 4);
4366- me->arch.got_offset = me->core_size;
4367- me->core_size += me->arch.got_size;
4368- me->arch.plt_offset = me->core_size;
4369- me->core_size += me->arch.plt_size;
4370+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
4371+ me->arch.got_offset = me->core_size_rw;
4372+ me->core_size_rw += me->arch.got_size;
4373+ me->arch.plt_offset = me->core_size_rx;
4374+ me->core_size_rx += me->arch.plt_size;
4375 return 0;
4376 }
4377
4378@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4379 if (info->got_initialized == 0) {
4380 Elf_Addr *gotent;
4381
4382- gotent = me->module_core + me->arch.got_offset +
4383+ gotent = me->module_core_rw + me->arch.got_offset +
4384 info->got_offset;
4385 *gotent = val;
4386 info->got_initialized = 1;
4387@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4388 else if (r_type == R_390_GOTENT ||
4389 r_type == R_390_GOTPLTENT)
4390 *(unsigned int *) loc =
4391- (val + (Elf_Addr) me->module_core - loc) >> 1;
4392+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4393 else if (r_type == R_390_GOT64 ||
4394 r_type == R_390_GOTPLT64)
4395 *(unsigned long *) loc = val;
4396@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4397 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4398 if (info->plt_initialized == 0) {
4399 unsigned int *ip;
4400- ip = me->module_core + me->arch.plt_offset +
4401+ ip = me->module_core_rx + me->arch.plt_offset +
4402 info->plt_offset;
4403 #ifndef CONFIG_64BIT
4404 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4405@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4406 val - loc + 0xffffUL < 0x1ffffeUL) ||
4407 (r_type == R_390_PLT32DBL &&
4408 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4409- val = (Elf_Addr) me->module_core +
4410+ val = (Elf_Addr) me->module_core_rx +
4411 me->arch.plt_offset +
4412 info->plt_offset;
4413 val += rela->r_addend - loc;
4414@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4415 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4416 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4417 val = val + rela->r_addend -
4418- ((Elf_Addr) me->module_core + me->arch.got_offset);
4419+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4420 if (r_type == R_390_GOTOFF16)
4421 *(unsigned short *) loc = val;
4422 else if (r_type == R_390_GOTOFF32)
4423@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4424 break;
4425 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4426 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4427- val = (Elf_Addr) me->module_core + me->arch.got_offset +
4428+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4429 rela->r_addend - loc;
4430 if (r_type == R_390_GOTPC)
4431 *(unsigned int *) loc = val;
4432diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4433index e795933..b32563c 100644
4434--- a/arch/s390/kernel/process.c
4435+++ b/arch/s390/kernel/process.c
4436@@ -323,39 +323,3 @@ unsigned long get_wchan(struct task_struct *p)
4437 }
4438 return 0;
4439 }
4440-
4441-unsigned long arch_align_stack(unsigned long sp)
4442-{
4443- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4444- sp -= get_random_int() & ~PAGE_MASK;
4445- return sp & ~0xf;
4446-}
4447-
4448-static inline unsigned long brk_rnd(void)
4449-{
4450- /* 8MB for 32bit, 1GB for 64bit */
4451- if (is_32bit_task())
4452- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4453- else
4454- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4455-}
4456-
4457-unsigned long arch_randomize_brk(struct mm_struct *mm)
4458-{
4459- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4460-
4461- if (ret < mm->brk)
4462- return mm->brk;
4463- return ret;
4464-}
4465-
4466-unsigned long randomize_et_dyn(unsigned long base)
4467-{
4468- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4469-
4470- if (!(current->flags & PF_RANDOMIZE))
4471- return base;
4472- if (ret < base)
4473- return base;
4474- return ret;
4475-}
4476diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4477index a0155c0..34cc491 100644
4478--- a/arch/s390/mm/mmap.c
4479+++ b/arch/s390/mm/mmap.c
4480@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4481 */
4482 if (mmap_is_legacy()) {
4483 mm->mmap_base = TASK_UNMAPPED_BASE;
4484+
4485+#ifdef CONFIG_PAX_RANDMMAP
4486+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4487+ mm->mmap_base += mm->delta_mmap;
4488+#endif
4489+
4490 mm->get_unmapped_area = arch_get_unmapped_area;
4491 mm->unmap_area = arch_unmap_area;
4492 } else {
4493 mm->mmap_base = mmap_base();
4494+
4495+#ifdef CONFIG_PAX_RANDMMAP
4496+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4497+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4498+#endif
4499+
4500 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4501 mm->unmap_area = arch_unmap_area_topdown;
4502 }
4503@@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4504 */
4505 if (mmap_is_legacy()) {
4506 mm->mmap_base = TASK_UNMAPPED_BASE;
4507+
4508+#ifdef CONFIG_PAX_RANDMMAP
4509+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4510+ mm->mmap_base += mm->delta_mmap;
4511+#endif
4512+
4513 mm->get_unmapped_area = s390_get_unmapped_area;
4514 mm->unmap_area = arch_unmap_area;
4515 } else {
4516 mm->mmap_base = mmap_base();
4517+
4518+#ifdef CONFIG_PAX_RANDMMAP
4519+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4520+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4521+#endif
4522+
4523 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4524 mm->unmap_area = arch_unmap_area_topdown;
4525 }
4526diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4527index ae3d59f..f65f075 100644
4528--- a/arch/score/include/asm/cache.h
4529+++ b/arch/score/include/asm/cache.h
4530@@ -1,7 +1,9 @@
4531 #ifndef _ASM_SCORE_CACHE_H
4532 #define _ASM_SCORE_CACHE_H
4533
4534+#include <linux/const.h>
4535+
4536 #define L1_CACHE_SHIFT 4
4537-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4538+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4539
4540 #endif /* _ASM_SCORE_CACHE_H */
4541diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4542index 589d5c7..669e274 100644
4543--- a/arch/score/include/asm/system.h
4544+++ b/arch/score/include/asm/system.h
4545@@ -17,7 +17,7 @@ do { \
4546 #define finish_arch_switch(prev) do {} while (0)
4547
4548 typedef void (*vi_handler_t)(void);
4549-extern unsigned long arch_align_stack(unsigned long sp);
4550+#define arch_align_stack(x) (x)
4551
4552 #define mb() barrier()
4553 #define rmb() barrier()
4554diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4555index 25d0803..d6c8e36 100644
4556--- a/arch/score/kernel/process.c
4557+++ b/arch/score/kernel/process.c
4558@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4559
4560 return task_pt_regs(task)->cp0_epc;
4561 }
4562-
4563-unsigned long arch_align_stack(unsigned long sp)
4564-{
4565- return sp;
4566-}
4567diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4568index ef9e555..331bd29 100644
4569--- a/arch/sh/include/asm/cache.h
4570+++ b/arch/sh/include/asm/cache.h
4571@@ -9,10 +9,11 @@
4572 #define __ASM_SH_CACHE_H
4573 #ifdef __KERNEL__
4574
4575+#include <linux/const.h>
4576 #include <linux/init.h>
4577 #include <cpu/cache.h>
4578
4579-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4580+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4581
4582 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4583
4584diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4585index afeb710..d1d1289 100644
4586--- a/arch/sh/mm/mmap.c
4587+++ b/arch/sh/mm/mmap.c
4588@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4589 addr = PAGE_ALIGN(addr);
4590
4591 vma = find_vma(mm, addr);
4592- if (TASK_SIZE - len >= addr &&
4593- (!vma || addr + len <= vma->vm_start))
4594+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4595 return addr;
4596 }
4597
4598@@ -106,7 +105,7 @@ full_search:
4599 }
4600 return -ENOMEM;
4601 }
4602- if (likely(!vma || addr + len <= vma->vm_start)) {
4603+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4604 /*
4605 * Remember the place where we stopped the search:
4606 */
4607@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4608 addr = PAGE_ALIGN(addr);
4609
4610 vma = find_vma(mm, addr);
4611- if (TASK_SIZE - len >= addr &&
4612- (!vma || addr + len <= vma->vm_start))
4613+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4614 return addr;
4615 }
4616
4617@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4618 /* make sure it can fit in the remaining address space */
4619 if (likely(addr > len)) {
4620 vma = find_vma(mm, addr-len);
4621- if (!vma || addr <= vma->vm_start) {
4622+ if (check_heap_stack_gap(vma, addr - len, len)) {
4623 /* remember the address as a hint for next time */
4624 return (mm->free_area_cache = addr-len);
4625 }
4626@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4627 if (unlikely(mm->mmap_base < len))
4628 goto bottomup;
4629
4630- addr = mm->mmap_base-len;
4631- if (do_colour_align)
4632- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4633+ addr = mm->mmap_base - len;
4634
4635 do {
4636+ if (do_colour_align)
4637+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4638 /*
4639 * Lookup failure means no vma is above this address,
4640 * else if new region fits below vma->vm_start,
4641 * return with success:
4642 */
4643 vma = find_vma(mm, addr);
4644- if (likely(!vma || addr+len <= vma->vm_start)) {
4645+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4646 /* remember the address as a hint for next time */
4647 return (mm->free_area_cache = addr);
4648 }
4649@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4650 mm->cached_hole_size = vma->vm_start - addr;
4651
4652 /* try just below the current vma->vm_start */
4653- addr = vma->vm_start-len;
4654- if (do_colour_align)
4655- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4656- } while (likely(len < vma->vm_start));
4657+ addr = skip_heap_stack_gap(vma, len);
4658+ } while (!IS_ERR_VALUE(addr));
4659
4660 bottomup:
4661 /*
4662diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4663index eddcfb3..b117d90 100644
4664--- a/arch/sparc/Makefile
4665+++ b/arch/sparc/Makefile
4666@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4667 # Export what is needed by arch/sparc/boot/Makefile
4668 export VMLINUX_INIT VMLINUX_MAIN
4669 VMLINUX_INIT := $(head-y) $(init-y)
4670-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4671+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4672 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4673 VMLINUX_MAIN += $(drivers-y) $(net-y)
4674
4675diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4676index 9f421df..b81fc12 100644
4677--- a/arch/sparc/include/asm/atomic_64.h
4678+++ b/arch/sparc/include/asm/atomic_64.h
4679@@ -14,18 +14,40 @@
4680 #define ATOMIC64_INIT(i) { (i) }
4681
4682 #define atomic_read(v) (*(volatile int *)&(v)->counter)
4683+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4684+{
4685+ return v->counter;
4686+}
4687 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
4688+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4689+{
4690+ return v->counter;
4691+}
4692
4693 #define atomic_set(v, i) (((v)->counter) = i)
4694+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4695+{
4696+ v->counter = i;
4697+}
4698 #define atomic64_set(v, i) (((v)->counter) = i)
4699+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4700+{
4701+ v->counter = i;
4702+}
4703
4704 extern void atomic_add(int, atomic_t *);
4705+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4706 extern void atomic64_add(long, atomic64_t *);
4707+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4708 extern void atomic_sub(int, atomic_t *);
4709+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4710 extern void atomic64_sub(long, atomic64_t *);
4711+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4712
4713 extern int atomic_add_ret(int, atomic_t *);
4714+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4715 extern long atomic64_add_ret(long, atomic64_t *);
4716+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4717 extern int atomic_sub_ret(int, atomic_t *);
4718 extern long atomic64_sub_ret(long, atomic64_t *);
4719
4720@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4721 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4722
4723 #define atomic_inc_return(v) atomic_add_ret(1, v)
4724+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4725+{
4726+ return atomic_add_ret_unchecked(1, v);
4727+}
4728 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4729+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4730+{
4731+ return atomic64_add_ret_unchecked(1, v);
4732+}
4733
4734 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4735 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4736
4737 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4738+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4739+{
4740+ return atomic_add_ret_unchecked(i, v);
4741+}
4742 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4743+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4744+{
4745+ return atomic64_add_ret_unchecked(i, v);
4746+}
4747
4748 /*
4749 * atomic_inc_and_test - increment and test
4750@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4751 * other cases.
4752 */
4753 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4754+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4755+{
4756+ return atomic_inc_return_unchecked(v) == 0;
4757+}
4758 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4759
4760 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4761@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4762 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4763
4764 #define atomic_inc(v) atomic_add(1, v)
4765+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4766+{
4767+ atomic_add_unchecked(1, v);
4768+}
4769 #define atomic64_inc(v) atomic64_add(1, v)
4770+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4771+{
4772+ atomic64_add_unchecked(1, v);
4773+}
4774
4775 #define atomic_dec(v) atomic_sub(1, v)
4776+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4777+{
4778+ atomic_sub_unchecked(1, v);
4779+}
4780 #define atomic64_dec(v) atomic64_sub(1, v)
4781+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4782+{
4783+ atomic64_sub_unchecked(1, v);
4784+}
4785
4786 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4787 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4788
4789 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4790+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4791+{
4792+ return cmpxchg(&v->counter, old, new);
4793+}
4794 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4795+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4796+{
4797+ return xchg(&v->counter, new);
4798+}
4799
4800 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4801 {
4802- int c, old;
4803+ int c, old, new;
4804 c = atomic_read(v);
4805 for (;;) {
4806- if (unlikely(c == (u)))
4807+ if (unlikely(c == u))
4808 break;
4809- old = atomic_cmpxchg((v), c, c + (a));
4810+
4811+ asm volatile("addcc %2, %0, %0\n"
4812+
4813+#ifdef CONFIG_PAX_REFCOUNT
4814+ "tvs %%icc, 6\n"
4815+#endif
4816+
4817+ : "=r" (new)
4818+ : "0" (c), "ir" (a)
4819+ : "cc");
4820+
4821+ old = atomic_cmpxchg(v, c, new);
4822 if (likely(old == c))
4823 break;
4824 c = old;
4825@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4826 #define atomic64_cmpxchg(v, o, n) \
4827 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4828 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4829+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4830+{
4831+ return xchg(&v->counter, new);
4832+}
4833
4834 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4835 {
4836- long c, old;
4837+ long c, old, new;
4838 c = atomic64_read(v);
4839 for (;;) {
4840- if (unlikely(c == (u)))
4841+ if (unlikely(c == u))
4842 break;
4843- old = atomic64_cmpxchg((v), c, c + (a));
4844+
4845+ asm volatile("addcc %2, %0, %0\n"
4846+
4847+#ifdef CONFIG_PAX_REFCOUNT
4848+ "tvs %%xcc, 6\n"
4849+#endif
4850+
4851+ : "=r" (new)
4852+ : "0" (c), "ir" (a)
4853+ : "cc");
4854+
4855+ old = atomic64_cmpxchg(v, c, new);
4856 if (likely(old == c))
4857 break;
4858 c = old;
4859 }
4860- return c != (u);
4861+ return c != u;
4862 }
4863
4864 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4865diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4866index 69358b5..9d0d492 100644
4867--- a/arch/sparc/include/asm/cache.h
4868+++ b/arch/sparc/include/asm/cache.h
4869@@ -7,10 +7,12 @@
4870 #ifndef _SPARC_CACHE_H
4871 #define _SPARC_CACHE_H
4872
4873+#include <linux/const.h>
4874+
4875 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
4876
4877 #define L1_CACHE_SHIFT 5
4878-#define L1_CACHE_BYTES 32
4879+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4880
4881 #ifdef CONFIG_SPARC32
4882 #define SMP_CACHE_BYTES_SHIFT 5
4883diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4884index 4269ca6..e3da77f 100644
4885--- a/arch/sparc/include/asm/elf_32.h
4886+++ b/arch/sparc/include/asm/elf_32.h
4887@@ -114,6 +114,13 @@ typedef struct {
4888
4889 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4890
4891+#ifdef CONFIG_PAX_ASLR
4892+#define PAX_ELF_ET_DYN_BASE 0x10000UL
4893+
4894+#define PAX_DELTA_MMAP_LEN 16
4895+#define PAX_DELTA_STACK_LEN 16
4896+#endif
4897+
4898 /* This yields a mask that user programs can use to figure out what
4899 instruction set this cpu supports. This can NOT be done in userspace
4900 on Sparc. */
4901diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4902index 7df8b7f..4946269 100644
4903--- a/arch/sparc/include/asm/elf_64.h
4904+++ b/arch/sparc/include/asm/elf_64.h
4905@@ -180,6 +180,13 @@ typedef struct {
4906 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4907 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4908
4909+#ifdef CONFIG_PAX_ASLR
4910+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4911+
4912+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4913+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4914+#endif
4915+
4916 extern unsigned long sparc64_elf_hwcap;
4917 #define ELF_HWCAP sparc64_elf_hwcap
4918
4919diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4920index a790cc6..091ed94 100644
4921--- a/arch/sparc/include/asm/pgtable_32.h
4922+++ b/arch/sparc/include/asm/pgtable_32.h
4923@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4924 BTFIXUPDEF_INT(page_none)
4925 BTFIXUPDEF_INT(page_copy)
4926 BTFIXUPDEF_INT(page_readonly)
4927+
4928+#ifdef CONFIG_PAX_PAGEEXEC
4929+BTFIXUPDEF_INT(page_shared_noexec)
4930+BTFIXUPDEF_INT(page_copy_noexec)
4931+BTFIXUPDEF_INT(page_readonly_noexec)
4932+#endif
4933+
4934 BTFIXUPDEF_INT(page_kernel)
4935
4936 #define PMD_SHIFT SUN4C_PMD_SHIFT
4937@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
4938 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4939 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4940
4941+#ifdef CONFIG_PAX_PAGEEXEC
4942+extern pgprot_t PAGE_SHARED_NOEXEC;
4943+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4944+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4945+#else
4946+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4947+# define PAGE_COPY_NOEXEC PAGE_COPY
4948+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4949+#endif
4950+
4951 extern unsigned long page_kernel;
4952
4953 #ifdef MODULE
4954diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4955index f6ae2b2..b03ffc7 100644
4956--- a/arch/sparc/include/asm/pgtsrmmu.h
4957+++ b/arch/sparc/include/asm/pgtsrmmu.h
4958@@ -115,6 +115,13 @@
4959 SRMMU_EXEC | SRMMU_REF)
4960 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4961 SRMMU_EXEC | SRMMU_REF)
4962+
4963+#ifdef CONFIG_PAX_PAGEEXEC
4964+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4965+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4966+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4967+#endif
4968+
4969 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4970 SRMMU_DIRTY | SRMMU_REF)
4971
4972diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4973index 9689176..63c18ea 100644
4974--- a/arch/sparc/include/asm/spinlock_64.h
4975+++ b/arch/sparc/include/asm/spinlock_64.h
4976@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
4977
4978 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4979
4980-static void inline arch_read_lock(arch_rwlock_t *lock)
4981+static inline void arch_read_lock(arch_rwlock_t *lock)
4982 {
4983 unsigned long tmp1, tmp2;
4984
4985 __asm__ __volatile__ (
4986 "1: ldsw [%2], %0\n"
4987 " brlz,pn %0, 2f\n"
4988-"4: add %0, 1, %1\n"
4989+"4: addcc %0, 1, %1\n"
4990+
4991+#ifdef CONFIG_PAX_REFCOUNT
4992+" tvs %%icc, 6\n"
4993+#endif
4994+
4995 " cas [%2], %0, %1\n"
4996 " cmp %0, %1\n"
4997 " bne,pn %%icc, 1b\n"
4998@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
4999 " .previous"
5000 : "=&r" (tmp1), "=&r" (tmp2)
5001 : "r" (lock)
5002- : "memory");
5003+ : "memory", "cc");
5004 }
5005
5006-static int inline arch_read_trylock(arch_rwlock_t *lock)
5007+static inline int arch_read_trylock(arch_rwlock_t *lock)
5008 {
5009 int tmp1, tmp2;
5010
5011@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5012 "1: ldsw [%2], %0\n"
5013 " brlz,a,pn %0, 2f\n"
5014 " mov 0, %0\n"
5015-" add %0, 1, %1\n"
5016+" addcc %0, 1, %1\n"
5017+
5018+#ifdef CONFIG_PAX_REFCOUNT
5019+" tvs %%icc, 6\n"
5020+#endif
5021+
5022 " cas [%2], %0, %1\n"
5023 " cmp %0, %1\n"
5024 " bne,pn %%icc, 1b\n"
5025@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5026 return tmp1;
5027 }
5028
5029-static void inline arch_read_unlock(arch_rwlock_t *lock)
5030+static inline void arch_read_unlock(arch_rwlock_t *lock)
5031 {
5032 unsigned long tmp1, tmp2;
5033
5034 __asm__ __volatile__(
5035 "1: lduw [%2], %0\n"
5036-" sub %0, 1, %1\n"
5037+" subcc %0, 1, %1\n"
5038+
5039+#ifdef CONFIG_PAX_REFCOUNT
5040+" tvs %%icc, 6\n"
5041+#endif
5042+
5043 " cas [%2], %0, %1\n"
5044 " cmp %0, %1\n"
5045 " bne,pn %%xcc, 1b\n"
5046@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5047 : "memory");
5048 }
5049
5050-static void inline arch_write_lock(arch_rwlock_t *lock)
5051+static inline void arch_write_lock(arch_rwlock_t *lock)
5052 {
5053 unsigned long mask, tmp1, tmp2;
5054
5055@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5056 : "memory");
5057 }
5058
5059-static void inline arch_write_unlock(arch_rwlock_t *lock)
5060+static inline void arch_write_unlock(arch_rwlock_t *lock)
5061 {
5062 __asm__ __volatile__(
5063 " stw %%g0, [%0]"
5064@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5065 : "memory");
5066 }
5067
5068-static int inline arch_write_trylock(arch_rwlock_t *lock)
5069+static inline int arch_write_trylock(arch_rwlock_t *lock)
5070 {
5071 unsigned long mask, tmp1, tmp2, result;
5072
5073diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5074index c2a1080..21ed218 100644
5075--- a/arch/sparc/include/asm/thread_info_32.h
5076+++ b/arch/sparc/include/asm/thread_info_32.h
5077@@ -50,6 +50,8 @@ struct thread_info {
5078 unsigned long w_saved;
5079
5080 struct restart_block restart_block;
5081+
5082+ unsigned long lowest_stack;
5083 };
5084
5085 /*
5086diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5087index 01d057f..0a02f7e 100644
5088--- a/arch/sparc/include/asm/thread_info_64.h
5089+++ b/arch/sparc/include/asm/thread_info_64.h
5090@@ -63,6 +63,8 @@ struct thread_info {
5091 struct pt_regs *kern_una_regs;
5092 unsigned int kern_una_insn;
5093
5094+ unsigned long lowest_stack;
5095+
5096 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5097 };
5098
5099diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5100index e88fbe5..96b0ce5 100644
5101--- a/arch/sparc/include/asm/uaccess.h
5102+++ b/arch/sparc/include/asm/uaccess.h
5103@@ -1,5 +1,13 @@
5104 #ifndef ___ASM_SPARC_UACCESS_H
5105 #define ___ASM_SPARC_UACCESS_H
5106+
5107+#ifdef __KERNEL__
5108+#ifndef __ASSEMBLY__
5109+#include <linux/types.h>
5110+extern void check_object_size(const void *ptr, unsigned long n, bool to);
5111+#endif
5112+#endif
5113+
5114 #if defined(__sparc__) && defined(__arch64__)
5115 #include <asm/uaccess_64.h>
5116 #else
5117diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5118index 8303ac4..07f333d 100644
5119--- a/arch/sparc/include/asm/uaccess_32.h
5120+++ b/arch/sparc/include/asm/uaccess_32.h
5121@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5122
5123 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5124 {
5125- if (n && __access_ok((unsigned long) to, n))
5126+ if ((long)n < 0)
5127+ return n;
5128+
5129+ if (n && __access_ok((unsigned long) to, n)) {
5130+ if (!__builtin_constant_p(n))
5131+ check_object_size(from, n, true);
5132 return __copy_user(to, (__force void __user *) from, n);
5133- else
5134+ } else
5135 return n;
5136 }
5137
5138 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5139 {
5140+ if ((long)n < 0)
5141+ return n;
5142+
5143+ if (!__builtin_constant_p(n))
5144+ check_object_size(from, n, true);
5145+
5146 return __copy_user(to, (__force void __user *) from, n);
5147 }
5148
5149 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5150 {
5151- if (n && __access_ok((unsigned long) from, n))
5152+ if ((long)n < 0)
5153+ return n;
5154+
5155+ if (n && __access_ok((unsigned long) from, n)) {
5156+ if (!__builtin_constant_p(n))
5157+ check_object_size(to, n, false);
5158 return __copy_user((__force void __user *) to, from, n);
5159- else
5160+ } else
5161 return n;
5162 }
5163
5164 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5165 {
5166+ if ((long)n < 0)
5167+ return n;
5168+
5169 return __copy_user((__force void __user *) to, from, n);
5170 }
5171
5172diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5173index 3e1449f..5293a0e 100644
5174--- a/arch/sparc/include/asm/uaccess_64.h
5175+++ b/arch/sparc/include/asm/uaccess_64.h
5176@@ -10,6 +10,7 @@
5177 #include <linux/compiler.h>
5178 #include <linux/string.h>
5179 #include <linux/thread_info.h>
5180+#include <linux/kernel.h>
5181 #include <asm/asi.h>
5182 #include <asm/system.h>
5183 #include <asm/spitfire.h>
5184@@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5185 static inline unsigned long __must_check
5186 copy_from_user(void *to, const void __user *from, unsigned long size)
5187 {
5188- unsigned long ret = ___copy_from_user(to, from, size);
5189+ unsigned long ret;
5190
5191+ if ((long)size < 0 || size > INT_MAX)
5192+ return size;
5193+
5194+ if (!__builtin_constant_p(size))
5195+ check_object_size(to, size, false);
5196+
5197+ ret = ___copy_from_user(to, from, size);
5198 if (unlikely(ret))
5199 ret = copy_from_user_fixup(to, from, size);
5200
5201@@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5202 static inline unsigned long __must_check
5203 copy_to_user(void __user *to, const void *from, unsigned long size)
5204 {
5205- unsigned long ret = ___copy_to_user(to, from, size);
5206+ unsigned long ret;
5207
5208+ if ((long)size < 0 || size > INT_MAX)
5209+ return size;
5210+
5211+ if (!__builtin_constant_p(size))
5212+ check_object_size(from, size, true);
5213+
5214+ ret = ___copy_to_user(to, from, size);
5215 if (unlikely(ret))
5216 ret = copy_to_user_fixup(to, from, size);
5217 return ret;
5218diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5219index cb85458..e063f17 100644
5220--- a/arch/sparc/kernel/Makefile
5221+++ b/arch/sparc/kernel/Makefile
5222@@ -3,7 +3,7 @@
5223 #
5224
5225 asflags-y := -ansi
5226-ccflags-y := -Werror
5227+#ccflags-y := -Werror
5228
5229 extra-y := head_$(BITS).o
5230 extra-y += init_task.o
5231diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5232index f793742..4d880af 100644
5233--- a/arch/sparc/kernel/process_32.c
5234+++ b/arch/sparc/kernel/process_32.c
5235@@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
5236 rw->ins[4], rw->ins[5],
5237 rw->ins[6],
5238 rw->ins[7]);
5239- printk("%pS\n", (void *) rw->ins[7]);
5240+ printk("%pA\n", (void *) rw->ins[7]);
5241 rw = (struct reg_window32 *) rw->ins[6];
5242 }
5243 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5244@@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
5245
5246 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5247 r->psr, r->pc, r->npc, r->y, print_tainted());
5248- printk("PC: <%pS>\n", (void *) r->pc);
5249+ printk("PC: <%pA>\n", (void *) r->pc);
5250 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5251 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5252 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5253 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5254 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5255 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5256- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5257+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5258
5259 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5260 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5261@@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5262 rw = (struct reg_window32 *) fp;
5263 pc = rw->ins[7];
5264 printk("[%08lx : ", pc);
5265- printk("%pS ] ", (void *) pc);
5266+ printk("%pA ] ", (void *) pc);
5267 fp = rw->ins[6];
5268 } while (++count < 16);
5269 printk("\n");
5270diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5271index 39d8b05..d1a7d90 100644
5272--- a/arch/sparc/kernel/process_64.c
5273+++ b/arch/sparc/kernel/process_64.c
5274@@ -182,14 +182,14 @@ static void show_regwindow(struct pt_regs *regs)
5275 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5276 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5277 if (regs->tstate & TSTATE_PRIV)
5278- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5279+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5280 }
5281
5282 void show_regs(struct pt_regs *regs)
5283 {
5284 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5285 regs->tpc, regs->tnpc, regs->y, print_tainted());
5286- printk("TPC: <%pS>\n", (void *) regs->tpc);
5287+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5288 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5289 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5290 regs->u_regs[3]);
5291@@ -202,7 +202,7 @@ void show_regs(struct pt_regs *regs)
5292 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5293 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5294 regs->u_regs[15]);
5295- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5296+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5297 show_regwindow(regs);
5298 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5299 }
5300@@ -287,7 +287,7 @@ void arch_trigger_all_cpu_backtrace(void)
5301 ((tp && tp->task) ? tp->task->pid : -1));
5302
5303 if (gp->tstate & TSTATE_PRIV) {
5304- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5305+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5306 (void *) gp->tpc,
5307 (void *) gp->o7,
5308 (void *) gp->i7,
5309diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5310index 42b282f..28ce9f2 100644
5311--- a/arch/sparc/kernel/sys_sparc_32.c
5312+++ b/arch/sparc/kernel/sys_sparc_32.c
5313@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5314 if (ARCH_SUN4C && len > 0x20000000)
5315 return -ENOMEM;
5316 if (!addr)
5317- addr = TASK_UNMAPPED_BASE;
5318+ addr = current->mm->mmap_base;
5319
5320 if (flags & MAP_SHARED)
5321 addr = COLOUR_ALIGN(addr);
5322@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5323 }
5324 if (TASK_SIZE - PAGE_SIZE - len < addr)
5325 return -ENOMEM;
5326- if (!vmm || addr + len <= vmm->vm_start)
5327+ if (check_heap_stack_gap(vmm, addr, len))
5328 return addr;
5329 addr = vmm->vm_end;
5330 if (flags & MAP_SHARED)
5331diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5332index 232df99..cee1f9c 100644
5333--- a/arch/sparc/kernel/sys_sparc_64.c
5334+++ b/arch/sparc/kernel/sys_sparc_64.c
5335@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5336 /* We do not accept a shared mapping if it would violate
5337 * cache aliasing constraints.
5338 */
5339- if ((flags & MAP_SHARED) &&
5340+ if ((filp || (flags & MAP_SHARED)) &&
5341 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5342 return -EINVAL;
5343 return addr;
5344@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5345 if (filp || (flags & MAP_SHARED))
5346 do_color_align = 1;
5347
5348+#ifdef CONFIG_PAX_RANDMMAP
5349+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5350+#endif
5351+
5352 if (addr) {
5353 if (do_color_align)
5354 addr = COLOUR_ALIGN(addr, pgoff);
5355@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5356 addr = PAGE_ALIGN(addr);
5357
5358 vma = find_vma(mm, addr);
5359- if (task_size - len >= addr &&
5360- (!vma || addr + len <= vma->vm_start))
5361+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5362 return addr;
5363 }
5364
5365 if (len > mm->cached_hole_size) {
5366- start_addr = addr = mm->free_area_cache;
5367+ start_addr = addr = mm->free_area_cache;
5368 } else {
5369- start_addr = addr = TASK_UNMAPPED_BASE;
5370+ start_addr = addr = mm->mmap_base;
5371 mm->cached_hole_size = 0;
5372 }
5373
5374@@ -174,14 +177,14 @@ full_search:
5375 vma = find_vma(mm, VA_EXCLUDE_END);
5376 }
5377 if (unlikely(task_size < addr)) {
5378- if (start_addr != TASK_UNMAPPED_BASE) {
5379- start_addr = addr = TASK_UNMAPPED_BASE;
5380+ if (start_addr != mm->mmap_base) {
5381+ start_addr = addr = mm->mmap_base;
5382 mm->cached_hole_size = 0;
5383 goto full_search;
5384 }
5385 return -ENOMEM;
5386 }
5387- if (likely(!vma || addr + len <= vma->vm_start)) {
5388+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5389 /*
5390 * Remember the place where we stopped the search:
5391 */
5392@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5393 /* We do not accept a shared mapping if it would violate
5394 * cache aliasing constraints.
5395 */
5396- if ((flags & MAP_SHARED) &&
5397+ if ((filp || (flags & MAP_SHARED)) &&
5398 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5399 return -EINVAL;
5400 return addr;
5401@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5402 addr = PAGE_ALIGN(addr);
5403
5404 vma = find_vma(mm, addr);
5405- if (task_size - len >= addr &&
5406- (!vma || addr + len <= vma->vm_start))
5407+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5408 return addr;
5409 }
5410
5411@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5412 /* make sure it can fit in the remaining address space */
5413 if (likely(addr > len)) {
5414 vma = find_vma(mm, addr-len);
5415- if (!vma || addr <= vma->vm_start) {
5416+ if (check_heap_stack_gap(vma, addr - len, len)) {
5417 /* remember the address as a hint for next time */
5418 return (mm->free_area_cache = addr-len);
5419 }
5420@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5421 if (unlikely(mm->mmap_base < len))
5422 goto bottomup;
5423
5424- addr = mm->mmap_base-len;
5425- if (do_color_align)
5426- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5427+ addr = mm->mmap_base - len;
5428
5429 do {
5430+ if (do_color_align)
5431+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5432 /*
5433 * Lookup failure means no vma is above this address,
5434 * else if new region fits below vma->vm_start,
5435 * return with success:
5436 */
5437 vma = find_vma(mm, addr);
5438- if (likely(!vma || addr+len <= vma->vm_start)) {
5439+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5440 /* remember the address as a hint for next time */
5441 return (mm->free_area_cache = addr);
5442 }
5443@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5444 mm->cached_hole_size = vma->vm_start - addr;
5445
5446 /* try just below the current vma->vm_start */
5447- addr = vma->vm_start-len;
5448- if (do_color_align)
5449- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5450- } while (likely(len < vma->vm_start));
5451+ addr = skip_heap_stack_gap(vma, len);
5452+ } while (!IS_ERR_VALUE(addr));
5453
5454 bottomup:
5455 /*
5456@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5457 gap == RLIM_INFINITY ||
5458 sysctl_legacy_va_layout) {
5459 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5460+
5461+#ifdef CONFIG_PAX_RANDMMAP
5462+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5463+ mm->mmap_base += mm->delta_mmap;
5464+#endif
5465+
5466 mm->get_unmapped_area = arch_get_unmapped_area;
5467 mm->unmap_area = arch_unmap_area;
5468 } else {
5469@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5470 gap = (task_size / 6 * 5);
5471
5472 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5473+
5474+#ifdef CONFIG_PAX_RANDMMAP
5475+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5476+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5477+#endif
5478+
5479 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5480 mm->unmap_area = arch_unmap_area_topdown;
5481 }
5482diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5483index 591f20c..0f1b925 100644
5484--- a/arch/sparc/kernel/traps_32.c
5485+++ b/arch/sparc/kernel/traps_32.c
5486@@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
5487 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5488 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5489
5490+extern void gr_handle_kernel_exploit(void);
5491+
5492 void die_if_kernel(char *str, struct pt_regs *regs)
5493 {
5494 static int die_counter;
5495@@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5496 count++ < 30 &&
5497 (((unsigned long) rw) >= PAGE_OFFSET) &&
5498 !(((unsigned long) rw) & 0x7)) {
5499- printk("Caller[%08lx]: %pS\n", rw->ins[7],
5500+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
5501 (void *) rw->ins[7]);
5502 rw = (struct reg_window32 *)rw->ins[6];
5503 }
5504 }
5505 printk("Instruction DUMP:");
5506 instruction_dump ((unsigned long *) regs->pc);
5507- if(regs->psr & PSR_PS)
5508+ if(regs->psr & PSR_PS) {
5509+ gr_handle_kernel_exploit();
5510 do_exit(SIGKILL);
5511+ }
5512 do_exit(SIGSEGV);
5513 }
5514
5515diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5516index 0cbdaa4..438e4c9 100644
5517--- a/arch/sparc/kernel/traps_64.c
5518+++ b/arch/sparc/kernel/traps_64.c
5519@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5520 i + 1,
5521 p->trapstack[i].tstate, p->trapstack[i].tpc,
5522 p->trapstack[i].tnpc, p->trapstack[i].tt);
5523- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5524+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5525 }
5526 }
5527
5528@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5529
5530 lvl -= 0x100;
5531 if (regs->tstate & TSTATE_PRIV) {
5532+
5533+#ifdef CONFIG_PAX_REFCOUNT
5534+ if (lvl == 6)
5535+ pax_report_refcount_overflow(regs);
5536+#endif
5537+
5538 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5539 die_if_kernel(buffer, regs);
5540 }
5541@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5542 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5543 {
5544 char buffer[32];
5545-
5546+
5547 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5548 0, lvl, SIGTRAP) == NOTIFY_STOP)
5549 return;
5550
5551+#ifdef CONFIG_PAX_REFCOUNT
5552+ if (lvl == 6)
5553+ pax_report_refcount_overflow(regs);
5554+#endif
5555+
5556 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5557
5558 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5559@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5560 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5561 printk("%s" "ERROR(%d): ",
5562 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5563- printk("TPC<%pS>\n", (void *) regs->tpc);
5564+ printk("TPC<%pA>\n", (void *) regs->tpc);
5565 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5566 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5567 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5568@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5569 smp_processor_id(),
5570 (type & 0x1) ? 'I' : 'D',
5571 regs->tpc);
5572- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5573+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5574 panic("Irrecoverable Cheetah+ parity error.");
5575 }
5576
5577@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5578 smp_processor_id(),
5579 (type & 0x1) ? 'I' : 'D',
5580 regs->tpc);
5581- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5582+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5583 }
5584
5585 struct sun4v_error_entry {
5586@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5587
5588 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5589 regs->tpc, tl);
5590- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5591+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5592 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5593- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5594+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5595 (void *) regs->u_regs[UREG_I7]);
5596 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5597 "pte[%lx] error[%lx]\n",
5598@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5599
5600 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5601 regs->tpc, tl);
5602- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5603+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5604 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5605- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5606+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5607 (void *) regs->u_regs[UREG_I7]);
5608 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5609 "pte[%lx] error[%lx]\n",
5610@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5611 fp = (unsigned long)sf->fp + STACK_BIAS;
5612 }
5613
5614- printk(" [%016lx] %pS\n", pc, (void *) pc);
5615+ printk(" [%016lx] %pA\n", pc, (void *) pc);
5616 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5617 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
5618 int index = tsk->curr_ret_stack;
5619 if (tsk->ret_stack && index >= graph) {
5620 pc = tsk->ret_stack[index - graph].ret;
5621- printk(" [%016lx] %pS\n", pc, (void *) pc);
5622+ printk(" [%016lx] %pA\n", pc, (void *) pc);
5623 graph++;
5624 }
5625 }
5626@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5627 return (struct reg_window *) (fp + STACK_BIAS);
5628 }
5629
5630+extern void gr_handle_kernel_exploit(void);
5631+
5632 void die_if_kernel(char *str, struct pt_regs *regs)
5633 {
5634 static int die_counter;
5635@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5636 while (rw &&
5637 count++ < 30 &&
5638 kstack_valid(tp, (unsigned long) rw)) {
5639- printk("Caller[%016lx]: %pS\n", rw->ins[7],
5640+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
5641 (void *) rw->ins[7]);
5642
5643 rw = kernel_stack_up(rw);
5644@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5645 }
5646 user_instruction_dump ((unsigned int __user *) regs->tpc);
5647 }
5648- if (regs->tstate & TSTATE_PRIV)
5649+ if (regs->tstate & TSTATE_PRIV) {
5650+ gr_handle_kernel_exploit();
5651 do_exit(SIGKILL);
5652+ }
5653 do_exit(SIGSEGV);
5654 }
5655 EXPORT_SYMBOL(die_if_kernel);
5656diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5657index 76e4ac1..78f8bb1 100644
5658--- a/arch/sparc/kernel/unaligned_64.c
5659+++ b/arch/sparc/kernel/unaligned_64.c
5660@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
5661 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
5662
5663 if (__ratelimit(&ratelimit)) {
5664- printk("Kernel unaligned access at TPC[%lx] %pS\n",
5665+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
5666 regs->tpc, (void *) regs->tpc);
5667 }
5668 }
5669diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5670index a3fc437..fea9957 100644
5671--- a/arch/sparc/lib/Makefile
5672+++ b/arch/sparc/lib/Makefile
5673@@ -2,7 +2,7 @@
5674 #
5675
5676 asflags-y := -ansi -DST_DIV0=0x02
5677-ccflags-y := -Werror
5678+#ccflags-y := -Werror
5679
5680 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5681 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5682diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5683index 59186e0..f747d7a 100644
5684--- a/arch/sparc/lib/atomic_64.S
5685+++ b/arch/sparc/lib/atomic_64.S
5686@@ -18,7 +18,12 @@
5687 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5688 BACKOFF_SETUP(%o2)
5689 1: lduw [%o1], %g1
5690- add %g1, %o0, %g7
5691+ addcc %g1, %o0, %g7
5692+
5693+#ifdef CONFIG_PAX_REFCOUNT
5694+ tvs %icc, 6
5695+#endif
5696+
5697 cas [%o1], %g1, %g7
5698 cmp %g1, %g7
5699 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5700@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5701 2: BACKOFF_SPIN(%o2, %o3, 1b)
5702 .size atomic_add, .-atomic_add
5703
5704+ .globl atomic_add_unchecked
5705+ .type atomic_add_unchecked,#function
5706+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5707+ BACKOFF_SETUP(%o2)
5708+1: lduw [%o1], %g1
5709+ add %g1, %o0, %g7
5710+ cas [%o1], %g1, %g7
5711+ cmp %g1, %g7
5712+ bne,pn %icc, 2f
5713+ nop
5714+ retl
5715+ nop
5716+2: BACKOFF_SPIN(%o2, %o3, 1b)
5717+ .size atomic_add_unchecked, .-atomic_add_unchecked
5718+
5719 .globl atomic_sub
5720 .type atomic_sub,#function
5721 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5722 BACKOFF_SETUP(%o2)
5723 1: lduw [%o1], %g1
5724- sub %g1, %o0, %g7
5725+ subcc %g1, %o0, %g7
5726+
5727+#ifdef CONFIG_PAX_REFCOUNT
5728+ tvs %icc, 6
5729+#endif
5730+
5731 cas [%o1], %g1, %g7
5732 cmp %g1, %g7
5733 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5734@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5735 2: BACKOFF_SPIN(%o2, %o3, 1b)
5736 .size atomic_sub, .-atomic_sub
5737
5738+ .globl atomic_sub_unchecked
5739+ .type atomic_sub_unchecked,#function
5740+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5741+ BACKOFF_SETUP(%o2)
5742+1: lduw [%o1], %g1
5743+ sub %g1, %o0, %g7
5744+ cas [%o1], %g1, %g7
5745+ cmp %g1, %g7
5746+ bne,pn %icc, 2f
5747+ nop
5748+ retl
5749+ nop
5750+2: BACKOFF_SPIN(%o2, %o3, 1b)
5751+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
5752+
5753 .globl atomic_add_ret
5754 .type atomic_add_ret,#function
5755 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5756 BACKOFF_SETUP(%o2)
5757 1: lduw [%o1], %g1
5758- add %g1, %o0, %g7
5759+ addcc %g1, %o0, %g7
5760+
5761+#ifdef CONFIG_PAX_REFCOUNT
5762+ tvs %icc, 6
5763+#endif
5764+
5765 cas [%o1], %g1, %g7
5766 cmp %g1, %g7
5767 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5768@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5769 2: BACKOFF_SPIN(%o2, %o3, 1b)
5770 .size atomic_add_ret, .-atomic_add_ret
5771
5772+ .globl atomic_add_ret_unchecked
5773+ .type atomic_add_ret_unchecked,#function
5774+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5775+ BACKOFF_SETUP(%o2)
5776+1: lduw [%o1], %g1
5777+ addcc %g1, %o0, %g7
5778+ cas [%o1], %g1, %g7
5779+ cmp %g1, %g7
5780+ bne,pn %icc, 2f
5781+ add %g7, %o0, %g7
5782+ sra %g7, 0, %o0
5783+ retl
5784+ nop
5785+2: BACKOFF_SPIN(%o2, %o3, 1b)
5786+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5787+
5788 .globl atomic_sub_ret
5789 .type atomic_sub_ret,#function
5790 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5791 BACKOFF_SETUP(%o2)
5792 1: lduw [%o1], %g1
5793- sub %g1, %o0, %g7
5794+ subcc %g1, %o0, %g7
5795+
5796+#ifdef CONFIG_PAX_REFCOUNT
5797+ tvs %icc, 6
5798+#endif
5799+
5800 cas [%o1], %g1, %g7
5801 cmp %g1, %g7
5802 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5803@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5804 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5805 BACKOFF_SETUP(%o2)
5806 1: ldx [%o1], %g1
5807- add %g1, %o0, %g7
5808+ addcc %g1, %o0, %g7
5809+
5810+#ifdef CONFIG_PAX_REFCOUNT
5811+ tvs %xcc, 6
5812+#endif
5813+
5814 casx [%o1], %g1, %g7
5815 cmp %g1, %g7
5816 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5817@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5818 2: BACKOFF_SPIN(%o2, %o3, 1b)
5819 .size atomic64_add, .-atomic64_add
5820
5821+ .globl atomic64_add_unchecked
5822+ .type atomic64_add_unchecked,#function
5823+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5824+ BACKOFF_SETUP(%o2)
5825+1: ldx [%o1], %g1
5826+ addcc %g1, %o0, %g7
5827+ casx [%o1], %g1, %g7
5828+ cmp %g1, %g7
5829+ bne,pn %xcc, 2f
5830+ nop
5831+ retl
5832+ nop
5833+2: BACKOFF_SPIN(%o2, %o3, 1b)
5834+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
5835+
5836 .globl atomic64_sub
5837 .type atomic64_sub,#function
5838 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5839 BACKOFF_SETUP(%o2)
5840 1: ldx [%o1], %g1
5841- sub %g1, %o0, %g7
5842+ subcc %g1, %o0, %g7
5843+
5844+#ifdef CONFIG_PAX_REFCOUNT
5845+ tvs %xcc, 6
5846+#endif
5847+
5848 casx [%o1], %g1, %g7
5849 cmp %g1, %g7
5850 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5851@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5852 2: BACKOFF_SPIN(%o2, %o3, 1b)
5853 .size atomic64_sub, .-atomic64_sub
5854
5855+ .globl atomic64_sub_unchecked
5856+ .type atomic64_sub_unchecked,#function
5857+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5858+ BACKOFF_SETUP(%o2)
5859+1: ldx [%o1], %g1
5860+ subcc %g1, %o0, %g7
5861+ casx [%o1], %g1, %g7
5862+ cmp %g1, %g7
5863+ bne,pn %xcc, 2f
5864+ nop
5865+ retl
5866+ nop
5867+2: BACKOFF_SPIN(%o2, %o3, 1b)
5868+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5869+
5870 .globl atomic64_add_ret
5871 .type atomic64_add_ret,#function
5872 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5873 BACKOFF_SETUP(%o2)
5874 1: ldx [%o1], %g1
5875- add %g1, %o0, %g7
5876+ addcc %g1, %o0, %g7
5877+
5878+#ifdef CONFIG_PAX_REFCOUNT
5879+ tvs %xcc, 6
5880+#endif
5881+
5882 casx [%o1], %g1, %g7
5883 cmp %g1, %g7
5884 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5885@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5886 2: BACKOFF_SPIN(%o2, %o3, 1b)
5887 .size atomic64_add_ret, .-atomic64_add_ret
5888
5889+ .globl atomic64_add_ret_unchecked
5890+ .type atomic64_add_ret_unchecked,#function
5891+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5892+ BACKOFF_SETUP(%o2)
5893+1: ldx [%o1], %g1
5894+ addcc %g1, %o0, %g7
5895+ casx [%o1], %g1, %g7
5896+ cmp %g1, %g7
5897+ bne,pn %xcc, 2f
5898+ add %g7, %o0, %g7
5899+ mov %g7, %o0
5900+ retl
5901+ nop
5902+2: BACKOFF_SPIN(%o2, %o3, 1b)
5903+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5904+
5905 .globl atomic64_sub_ret
5906 .type atomic64_sub_ret,#function
5907 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5908 BACKOFF_SETUP(%o2)
5909 1: ldx [%o1], %g1
5910- sub %g1, %o0, %g7
5911+ subcc %g1, %o0, %g7
5912+
5913+#ifdef CONFIG_PAX_REFCOUNT
5914+ tvs %xcc, 6
5915+#endif
5916+
5917 casx [%o1], %g1, %g7
5918 cmp %g1, %g7
5919 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5920diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5921index f73c224..662af10 100644
5922--- a/arch/sparc/lib/ksyms.c
5923+++ b/arch/sparc/lib/ksyms.c
5924@@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
5925
5926 /* Atomic counter implementation. */
5927 EXPORT_SYMBOL(atomic_add);
5928+EXPORT_SYMBOL(atomic_add_unchecked);
5929 EXPORT_SYMBOL(atomic_add_ret);
5930+EXPORT_SYMBOL(atomic_add_ret_unchecked);
5931 EXPORT_SYMBOL(atomic_sub);
5932+EXPORT_SYMBOL(atomic_sub_unchecked);
5933 EXPORT_SYMBOL(atomic_sub_ret);
5934 EXPORT_SYMBOL(atomic64_add);
5935+EXPORT_SYMBOL(atomic64_add_unchecked);
5936 EXPORT_SYMBOL(atomic64_add_ret);
5937+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5938 EXPORT_SYMBOL(atomic64_sub);
5939+EXPORT_SYMBOL(atomic64_sub_unchecked);
5940 EXPORT_SYMBOL(atomic64_sub_ret);
5941
5942 /* Atomic bit operations. */
5943diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5944index 301421c..e2535d1 100644
5945--- a/arch/sparc/mm/Makefile
5946+++ b/arch/sparc/mm/Makefile
5947@@ -2,7 +2,7 @@
5948 #
5949
5950 asflags-y := -ansi
5951-ccflags-y := -Werror
5952+#ccflags-y := -Werror
5953
5954 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
5955 obj-y += fault_$(BITS).o
5956diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
5957index 8023fd7..c8e89e9 100644
5958--- a/arch/sparc/mm/fault_32.c
5959+++ b/arch/sparc/mm/fault_32.c
5960@@ -21,6 +21,9 @@
5961 #include <linux/perf_event.h>
5962 #include <linux/interrupt.h>
5963 #include <linux/kdebug.h>
5964+#include <linux/slab.h>
5965+#include <linux/pagemap.h>
5966+#include <linux/compiler.h>
5967
5968 #include <asm/system.h>
5969 #include <asm/page.h>
5970@@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
5971 return safe_compute_effective_address(regs, insn);
5972 }
5973
5974+#ifdef CONFIG_PAX_PAGEEXEC
5975+#ifdef CONFIG_PAX_DLRESOLVE
5976+static void pax_emuplt_close(struct vm_area_struct *vma)
5977+{
5978+ vma->vm_mm->call_dl_resolve = 0UL;
5979+}
5980+
5981+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5982+{
5983+ unsigned int *kaddr;
5984+
5985+ vmf->page = alloc_page(GFP_HIGHUSER);
5986+ if (!vmf->page)
5987+ return VM_FAULT_OOM;
5988+
5989+ kaddr = kmap(vmf->page);
5990+ memset(kaddr, 0, PAGE_SIZE);
5991+ kaddr[0] = 0x9DE3BFA8U; /* save */
5992+ flush_dcache_page(vmf->page);
5993+ kunmap(vmf->page);
5994+ return VM_FAULT_MAJOR;
5995+}
5996+
5997+static const struct vm_operations_struct pax_vm_ops = {
5998+ .close = pax_emuplt_close,
5999+ .fault = pax_emuplt_fault
6000+};
6001+
6002+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6003+{
6004+ int ret;
6005+
6006+ INIT_LIST_HEAD(&vma->anon_vma_chain);
6007+ vma->vm_mm = current->mm;
6008+ vma->vm_start = addr;
6009+ vma->vm_end = addr + PAGE_SIZE;
6010+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6011+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6012+ vma->vm_ops = &pax_vm_ops;
6013+
6014+ ret = insert_vm_struct(current->mm, vma);
6015+ if (ret)
6016+ return ret;
6017+
6018+ ++current->mm->total_vm;
6019+ return 0;
6020+}
6021+#endif
6022+
6023+/*
6024+ * PaX: decide what to do with offenders (regs->pc = fault address)
6025+ *
6026+ * returns 1 when task should be killed
6027+ * 2 when patched PLT trampoline was detected
6028+ * 3 when unpatched PLT trampoline was detected
6029+ */
6030+static int pax_handle_fetch_fault(struct pt_regs *regs)
6031+{
6032+
6033+#ifdef CONFIG_PAX_EMUPLT
6034+ int err;
6035+
6036+ do { /* PaX: patched PLT emulation #1 */
6037+ unsigned int sethi1, sethi2, jmpl;
6038+
6039+ err = get_user(sethi1, (unsigned int *)regs->pc);
6040+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6041+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6042+
6043+ if (err)
6044+ break;
6045+
6046+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6047+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6048+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6049+ {
6050+ unsigned int addr;
6051+
6052+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6053+ addr = regs->u_regs[UREG_G1];
6054+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6055+ regs->pc = addr;
6056+ regs->npc = addr+4;
6057+ return 2;
6058+ }
6059+ } while (0);
6060+
6061+ { /* PaX: patched PLT emulation #2 */
6062+ unsigned int ba;
6063+
6064+ err = get_user(ba, (unsigned int *)regs->pc);
6065+
6066+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6067+ unsigned int addr;
6068+
6069+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6070+ regs->pc = addr;
6071+ regs->npc = addr+4;
6072+ return 2;
6073+ }
6074+ }
6075+
6076+ do { /* PaX: patched PLT emulation #3 */
6077+ unsigned int sethi, jmpl, nop;
6078+
6079+ err = get_user(sethi, (unsigned int *)regs->pc);
6080+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6081+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6082+
6083+ if (err)
6084+ break;
6085+
6086+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6087+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6088+ nop == 0x01000000U)
6089+ {
6090+ unsigned int addr;
6091+
6092+ addr = (sethi & 0x003FFFFFU) << 10;
6093+ regs->u_regs[UREG_G1] = addr;
6094+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6095+ regs->pc = addr;
6096+ regs->npc = addr+4;
6097+ return 2;
6098+ }
6099+ } while (0);
6100+
6101+ do { /* PaX: unpatched PLT emulation step 1 */
6102+ unsigned int sethi, ba, nop;
6103+
6104+ err = get_user(sethi, (unsigned int *)regs->pc);
6105+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
6106+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6107+
6108+ if (err)
6109+ break;
6110+
6111+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6112+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6113+ nop == 0x01000000U)
6114+ {
6115+ unsigned int addr, save, call;
6116+
6117+ if ((ba & 0xFFC00000U) == 0x30800000U)
6118+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6119+ else
6120+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6121+
6122+ err = get_user(save, (unsigned int *)addr);
6123+ err |= get_user(call, (unsigned int *)(addr+4));
6124+ err |= get_user(nop, (unsigned int *)(addr+8));
6125+ if (err)
6126+ break;
6127+
6128+#ifdef CONFIG_PAX_DLRESOLVE
6129+ if (save == 0x9DE3BFA8U &&
6130+ (call & 0xC0000000U) == 0x40000000U &&
6131+ nop == 0x01000000U)
6132+ {
6133+ struct vm_area_struct *vma;
6134+ unsigned long call_dl_resolve;
6135+
6136+ down_read(&current->mm->mmap_sem);
6137+ call_dl_resolve = current->mm->call_dl_resolve;
6138+ up_read(&current->mm->mmap_sem);
6139+ if (likely(call_dl_resolve))
6140+ goto emulate;
6141+
6142+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6143+
6144+ down_write(&current->mm->mmap_sem);
6145+ if (current->mm->call_dl_resolve) {
6146+ call_dl_resolve = current->mm->call_dl_resolve;
6147+ up_write(&current->mm->mmap_sem);
6148+ if (vma)
6149+ kmem_cache_free(vm_area_cachep, vma);
6150+ goto emulate;
6151+ }
6152+
6153+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6154+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6155+ up_write(&current->mm->mmap_sem);
6156+ if (vma)
6157+ kmem_cache_free(vm_area_cachep, vma);
6158+ return 1;
6159+ }
6160+
6161+ if (pax_insert_vma(vma, call_dl_resolve)) {
6162+ up_write(&current->mm->mmap_sem);
6163+ kmem_cache_free(vm_area_cachep, vma);
6164+ return 1;
6165+ }
6166+
6167+ current->mm->call_dl_resolve = call_dl_resolve;
6168+ up_write(&current->mm->mmap_sem);
6169+
6170+emulate:
6171+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6172+ regs->pc = call_dl_resolve;
6173+ regs->npc = addr+4;
6174+ return 3;
6175+ }
6176+#endif
6177+
6178+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6179+ if ((save & 0xFFC00000U) == 0x05000000U &&
6180+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6181+ nop == 0x01000000U)
6182+ {
6183+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6184+ regs->u_regs[UREG_G2] = addr + 4;
6185+ addr = (save & 0x003FFFFFU) << 10;
6186+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6187+ regs->pc = addr;
6188+ regs->npc = addr+4;
6189+ return 3;
6190+ }
6191+ }
6192+ } while (0);
6193+
6194+ do { /* PaX: unpatched PLT emulation step 2 */
6195+ unsigned int save, call, nop;
6196+
6197+ err = get_user(save, (unsigned int *)(regs->pc-4));
6198+ err |= get_user(call, (unsigned int *)regs->pc);
6199+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6200+ if (err)
6201+ break;
6202+
6203+ if (save == 0x9DE3BFA8U &&
6204+ (call & 0xC0000000U) == 0x40000000U &&
6205+ nop == 0x01000000U)
6206+ {
6207+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6208+
6209+ regs->u_regs[UREG_RETPC] = regs->pc;
6210+ regs->pc = dl_resolve;
6211+ regs->npc = dl_resolve+4;
6212+ return 3;
6213+ }
6214+ } while (0);
6215+#endif
6216+
6217+ return 1;
6218+}
6219+
6220+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6221+{
6222+ unsigned long i;
6223+
6224+ printk(KERN_ERR "PAX: bytes at PC: ");
6225+ for (i = 0; i < 8; i++) {
6226+ unsigned int c;
6227+ if (get_user(c, (unsigned int *)pc+i))
6228+ printk(KERN_CONT "???????? ");
6229+ else
6230+ printk(KERN_CONT "%08x ", c);
6231+ }
6232+ printk("\n");
6233+}
6234+#endif
6235+
6236 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6237 int text_fault)
6238 {
6239@@ -280,6 +545,24 @@ good_area:
6240 if(!(vma->vm_flags & VM_WRITE))
6241 goto bad_area;
6242 } else {
6243+
6244+#ifdef CONFIG_PAX_PAGEEXEC
6245+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6246+ up_read(&mm->mmap_sem);
6247+ switch (pax_handle_fetch_fault(regs)) {
6248+
6249+#ifdef CONFIG_PAX_EMUPLT
6250+ case 2:
6251+ case 3:
6252+ return;
6253+#endif
6254+
6255+ }
6256+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6257+ do_group_exit(SIGKILL);
6258+ }
6259+#endif
6260+
6261 /* Allow reads even for write-only mappings */
6262 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6263 goto bad_area;
6264diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6265index 504c062..6fcb9c6 100644
6266--- a/arch/sparc/mm/fault_64.c
6267+++ b/arch/sparc/mm/fault_64.c
6268@@ -21,6 +21,9 @@
6269 #include <linux/kprobes.h>
6270 #include <linux/kdebug.h>
6271 #include <linux/percpu.h>
6272+#include <linux/slab.h>
6273+#include <linux/pagemap.h>
6274+#include <linux/compiler.h>
6275
6276 #include <asm/page.h>
6277 #include <asm/pgtable.h>
6278@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6279 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6280 regs->tpc);
6281 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6282- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6283+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6284 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6285 dump_stack();
6286 unhandled_fault(regs->tpc, current, regs);
6287@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6288 show_regs(regs);
6289 }
6290
6291+#ifdef CONFIG_PAX_PAGEEXEC
6292+#ifdef CONFIG_PAX_DLRESOLVE
6293+static void pax_emuplt_close(struct vm_area_struct *vma)
6294+{
6295+ vma->vm_mm->call_dl_resolve = 0UL;
6296+}
6297+
6298+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6299+{
6300+ unsigned int *kaddr;
6301+
6302+ vmf->page = alloc_page(GFP_HIGHUSER);
6303+ if (!vmf->page)
6304+ return VM_FAULT_OOM;
6305+
6306+ kaddr = kmap(vmf->page);
6307+ memset(kaddr, 0, PAGE_SIZE);
6308+ kaddr[0] = 0x9DE3BFA8U; /* save */
6309+ flush_dcache_page(vmf->page);
6310+ kunmap(vmf->page);
6311+ return VM_FAULT_MAJOR;
6312+}
6313+
6314+static const struct vm_operations_struct pax_vm_ops = {
6315+ .close = pax_emuplt_close,
6316+ .fault = pax_emuplt_fault
6317+};
6318+
6319+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6320+{
6321+ int ret;
6322+
6323+ INIT_LIST_HEAD(&vma->anon_vma_chain);
6324+ vma->vm_mm = current->mm;
6325+ vma->vm_start = addr;
6326+ vma->vm_end = addr + PAGE_SIZE;
6327+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6328+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6329+ vma->vm_ops = &pax_vm_ops;
6330+
6331+ ret = insert_vm_struct(current->mm, vma);
6332+ if (ret)
6333+ return ret;
6334+
6335+ ++current->mm->total_vm;
6336+ return 0;
6337+}
6338+#endif
6339+
6340+/*
6341+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6342+ *
6343+ * returns 1 when task should be killed
6344+ * 2 when patched PLT trampoline was detected
6345+ * 3 when unpatched PLT trampoline was detected
6346+ */
6347+static int pax_handle_fetch_fault(struct pt_regs *regs)
6348+{
6349+
6350+#ifdef CONFIG_PAX_EMUPLT
6351+ int err;
6352+
6353+ do { /* PaX: patched PLT emulation #1 */
6354+ unsigned int sethi1, sethi2, jmpl;
6355+
6356+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6357+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6358+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6359+
6360+ if (err)
6361+ break;
6362+
6363+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6364+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6365+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6366+ {
6367+ unsigned long addr;
6368+
6369+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6370+ addr = regs->u_regs[UREG_G1];
6371+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6372+
6373+ if (test_thread_flag(TIF_32BIT))
6374+ addr &= 0xFFFFFFFFUL;
6375+
6376+ regs->tpc = addr;
6377+ regs->tnpc = addr+4;
6378+ return 2;
6379+ }
6380+ } while (0);
6381+
6382+ { /* PaX: patched PLT emulation #2 */
6383+ unsigned int ba;
6384+
6385+ err = get_user(ba, (unsigned int *)regs->tpc);
6386+
6387+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6388+ unsigned long addr;
6389+
6390+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6391+
6392+ if (test_thread_flag(TIF_32BIT))
6393+ addr &= 0xFFFFFFFFUL;
6394+
6395+ regs->tpc = addr;
6396+ regs->tnpc = addr+4;
6397+ return 2;
6398+ }
6399+ }
6400+
6401+ do { /* PaX: patched PLT emulation #3 */
6402+ unsigned int sethi, jmpl, nop;
6403+
6404+ err = get_user(sethi, (unsigned int *)regs->tpc);
6405+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6406+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6407+
6408+ if (err)
6409+ break;
6410+
6411+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6412+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6413+ nop == 0x01000000U)
6414+ {
6415+ unsigned long addr;
6416+
6417+ addr = (sethi & 0x003FFFFFU) << 10;
6418+ regs->u_regs[UREG_G1] = addr;
6419+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6420+
6421+ if (test_thread_flag(TIF_32BIT))
6422+ addr &= 0xFFFFFFFFUL;
6423+
6424+ regs->tpc = addr;
6425+ regs->tnpc = addr+4;
6426+ return 2;
6427+ }
6428+ } while (0);
6429+
6430+ do { /* PaX: patched PLT emulation #4 */
6431+ unsigned int sethi, mov1, call, mov2;
6432+
6433+ err = get_user(sethi, (unsigned int *)regs->tpc);
6434+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6435+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
6436+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6437+
6438+ if (err)
6439+ break;
6440+
6441+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6442+ mov1 == 0x8210000FU &&
6443+ (call & 0xC0000000U) == 0x40000000U &&
6444+ mov2 == 0x9E100001U)
6445+ {
6446+ unsigned long addr;
6447+
6448+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6449+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6450+
6451+ if (test_thread_flag(TIF_32BIT))
6452+ addr &= 0xFFFFFFFFUL;
6453+
6454+ regs->tpc = addr;
6455+ regs->tnpc = addr+4;
6456+ return 2;
6457+ }
6458+ } while (0);
6459+
6460+ do { /* PaX: patched PLT emulation #5 */
6461+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6462+
6463+ err = get_user(sethi, (unsigned int *)regs->tpc);
6464+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6465+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6466+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6467+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6468+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6469+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6470+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6471+
6472+ if (err)
6473+ break;
6474+
6475+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6476+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6477+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6478+ (or1 & 0xFFFFE000U) == 0x82106000U &&
6479+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6480+ sllx == 0x83287020U &&
6481+ jmpl == 0x81C04005U &&
6482+ nop == 0x01000000U)
6483+ {
6484+ unsigned long addr;
6485+
6486+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6487+ regs->u_regs[UREG_G1] <<= 32;
6488+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6489+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6490+ regs->tpc = addr;
6491+ regs->tnpc = addr+4;
6492+ return 2;
6493+ }
6494+ } while (0);
6495+
6496+ do { /* PaX: patched PLT emulation #6 */
6497+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6498+
6499+ err = get_user(sethi, (unsigned int *)regs->tpc);
6500+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6501+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6502+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6503+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
6504+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6505+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6506+
6507+ if (err)
6508+ break;
6509+
6510+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6511+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6512+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6513+ sllx == 0x83287020U &&
6514+ (or & 0xFFFFE000U) == 0x8A116000U &&
6515+ jmpl == 0x81C04005U &&
6516+ nop == 0x01000000U)
6517+ {
6518+ unsigned long addr;
6519+
6520+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6521+ regs->u_regs[UREG_G1] <<= 32;
6522+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6523+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6524+ regs->tpc = addr;
6525+ regs->tnpc = addr+4;
6526+ return 2;
6527+ }
6528+ } while (0);
6529+
6530+ do { /* PaX: unpatched PLT emulation step 1 */
6531+ unsigned int sethi, ba, nop;
6532+
6533+ err = get_user(sethi, (unsigned int *)regs->tpc);
6534+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6535+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6536+
6537+ if (err)
6538+ break;
6539+
6540+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6541+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6542+ nop == 0x01000000U)
6543+ {
6544+ unsigned long addr;
6545+ unsigned int save, call;
6546+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6547+
6548+ if ((ba & 0xFFC00000U) == 0x30800000U)
6549+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6550+ else
6551+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6552+
6553+ if (test_thread_flag(TIF_32BIT))
6554+ addr &= 0xFFFFFFFFUL;
6555+
6556+ err = get_user(save, (unsigned int *)addr);
6557+ err |= get_user(call, (unsigned int *)(addr+4));
6558+ err |= get_user(nop, (unsigned int *)(addr+8));
6559+ if (err)
6560+ break;
6561+
6562+#ifdef CONFIG_PAX_DLRESOLVE
6563+ if (save == 0x9DE3BFA8U &&
6564+ (call & 0xC0000000U) == 0x40000000U &&
6565+ nop == 0x01000000U)
6566+ {
6567+ struct vm_area_struct *vma;
6568+ unsigned long call_dl_resolve;
6569+
6570+ down_read(&current->mm->mmap_sem);
6571+ call_dl_resolve = current->mm->call_dl_resolve;
6572+ up_read(&current->mm->mmap_sem);
6573+ if (likely(call_dl_resolve))
6574+ goto emulate;
6575+
6576+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6577+
6578+ down_write(&current->mm->mmap_sem);
6579+ if (current->mm->call_dl_resolve) {
6580+ call_dl_resolve = current->mm->call_dl_resolve;
6581+ up_write(&current->mm->mmap_sem);
6582+ if (vma)
6583+ kmem_cache_free(vm_area_cachep, vma);
6584+ goto emulate;
6585+ }
6586+
6587+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6588+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6589+ up_write(&current->mm->mmap_sem);
6590+ if (vma)
6591+ kmem_cache_free(vm_area_cachep, vma);
6592+ return 1;
6593+ }
6594+
6595+ if (pax_insert_vma(vma, call_dl_resolve)) {
6596+ up_write(&current->mm->mmap_sem);
6597+ kmem_cache_free(vm_area_cachep, vma);
6598+ return 1;
6599+ }
6600+
6601+ current->mm->call_dl_resolve = call_dl_resolve;
6602+ up_write(&current->mm->mmap_sem);
6603+
6604+emulate:
6605+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6606+ regs->tpc = call_dl_resolve;
6607+ regs->tnpc = addr+4;
6608+ return 3;
6609+ }
6610+#endif
6611+
6612+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6613+ if ((save & 0xFFC00000U) == 0x05000000U &&
6614+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6615+ nop == 0x01000000U)
6616+ {
6617+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6618+ regs->u_regs[UREG_G2] = addr + 4;
6619+ addr = (save & 0x003FFFFFU) << 10;
6620+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6621+
6622+ if (test_thread_flag(TIF_32BIT))
6623+ addr &= 0xFFFFFFFFUL;
6624+
6625+ regs->tpc = addr;
6626+ regs->tnpc = addr+4;
6627+ return 3;
6628+ }
6629+
6630+ /* PaX: 64-bit PLT stub */
6631+ err = get_user(sethi1, (unsigned int *)addr);
6632+ err |= get_user(sethi2, (unsigned int *)(addr+4));
6633+ err |= get_user(or1, (unsigned int *)(addr+8));
6634+ err |= get_user(or2, (unsigned int *)(addr+12));
6635+ err |= get_user(sllx, (unsigned int *)(addr+16));
6636+ err |= get_user(add, (unsigned int *)(addr+20));
6637+ err |= get_user(jmpl, (unsigned int *)(addr+24));
6638+ err |= get_user(nop, (unsigned int *)(addr+28));
6639+ if (err)
6640+ break;
6641+
6642+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6643+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6644+ (or1 & 0xFFFFE000U) == 0x88112000U &&
6645+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6646+ sllx == 0x89293020U &&
6647+ add == 0x8A010005U &&
6648+ jmpl == 0x89C14000U &&
6649+ nop == 0x01000000U)
6650+ {
6651+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6652+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6653+ regs->u_regs[UREG_G4] <<= 32;
6654+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6655+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6656+ regs->u_regs[UREG_G4] = addr + 24;
6657+ addr = regs->u_regs[UREG_G5];
6658+ regs->tpc = addr;
6659+ regs->tnpc = addr+4;
6660+ return 3;
6661+ }
6662+ }
6663+ } while (0);
6664+
6665+#ifdef CONFIG_PAX_DLRESOLVE
6666+ do { /* PaX: unpatched PLT emulation step 2 */
6667+ unsigned int save, call, nop;
6668+
6669+ err = get_user(save, (unsigned int *)(regs->tpc-4));
6670+ err |= get_user(call, (unsigned int *)regs->tpc);
6671+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6672+ if (err)
6673+ break;
6674+
6675+ if (save == 0x9DE3BFA8U &&
6676+ (call & 0xC0000000U) == 0x40000000U &&
6677+ nop == 0x01000000U)
6678+ {
6679+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6680+
6681+ if (test_thread_flag(TIF_32BIT))
6682+ dl_resolve &= 0xFFFFFFFFUL;
6683+
6684+ regs->u_regs[UREG_RETPC] = regs->tpc;
6685+ regs->tpc = dl_resolve;
6686+ regs->tnpc = dl_resolve+4;
6687+ return 3;
6688+ }
6689+ } while (0);
6690+#endif
6691+
6692+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6693+ unsigned int sethi, ba, nop;
6694+
6695+ err = get_user(sethi, (unsigned int *)regs->tpc);
6696+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6697+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6698+
6699+ if (err)
6700+ break;
6701+
6702+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6703+ (ba & 0xFFF00000U) == 0x30600000U &&
6704+ nop == 0x01000000U)
6705+ {
6706+ unsigned long addr;
6707+
6708+ addr = (sethi & 0x003FFFFFU) << 10;
6709+ regs->u_regs[UREG_G1] = addr;
6710+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6711+
6712+ if (test_thread_flag(TIF_32BIT))
6713+ addr &= 0xFFFFFFFFUL;
6714+
6715+ regs->tpc = addr;
6716+ regs->tnpc = addr+4;
6717+ return 2;
6718+ }
6719+ } while (0);
6720+
6721+#endif
6722+
6723+ return 1;
6724+}
6725+
6726+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6727+{
6728+ unsigned long i;
6729+
6730+ printk(KERN_ERR "PAX: bytes at PC: ");
6731+ for (i = 0; i < 8; i++) {
6732+ unsigned int c;
6733+ if (get_user(c, (unsigned int *)pc+i))
6734+ printk(KERN_CONT "???????? ");
6735+ else
6736+ printk(KERN_CONT "%08x ", c);
6737+ }
6738+ printk("\n");
6739+}
6740+#endif
6741+
6742 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6743 {
6744 struct mm_struct *mm = current->mm;
6745@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6746 if (!vma)
6747 goto bad_area;
6748
6749+#ifdef CONFIG_PAX_PAGEEXEC
6750+ /* PaX: detect ITLB misses on non-exec pages */
6751+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6752+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6753+ {
6754+ if (address != regs->tpc)
6755+ goto good_area;
6756+
6757+ up_read(&mm->mmap_sem);
6758+ switch (pax_handle_fetch_fault(regs)) {
6759+
6760+#ifdef CONFIG_PAX_EMUPLT
6761+ case 2:
6762+ case 3:
6763+ return;
6764+#endif
6765+
6766+ }
6767+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6768+ do_group_exit(SIGKILL);
6769+ }
6770+#endif
6771+
6772 /* Pure DTLB misses do not tell us whether the fault causing
6773 * load/store/atomic was a write or not, it only says that there
6774 * was no match. So in such a case we (carefully) read the
6775diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6776index 07e1453..0a7d9e9 100644
6777--- a/arch/sparc/mm/hugetlbpage.c
6778+++ b/arch/sparc/mm/hugetlbpage.c
6779@@ -67,7 +67,7 @@ full_search:
6780 }
6781 return -ENOMEM;
6782 }
6783- if (likely(!vma || addr + len <= vma->vm_start)) {
6784+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6785 /*
6786 * Remember the place where we stopped the search:
6787 */
6788@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6789 /* make sure it can fit in the remaining address space */
6790 if (likely(addr > len)) {
6791 vma = find_vma(mm, addr-len);
6792- if (!vma || addr <= vma->vm_start) {
6793+ if (check_heap_stack_gap(vma, addr - len, len)) {
6794 /* remember the address as a hint for next time */
6795 return (mm->free_area_cache = addr-len);
6796 }
6797@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6798 if (unlikely(mm->mmap_base < len))
6799 goto bottomup;
6800
6801- addr = (mm->mmap_base-len) & HPAGE_MASK;
6802+ addr = mm->mmap_base - len;
6803
6804 do {
6805+ addr &= HPAGE_MASK;
6806 /*
6807 * Lookup failure means no vma is above this address,
6808 * else if new region fits below vma->vm_start,
6809 * return with success:
6810 */
6811 vma = find_vma(mm, addr);
6812- if (likely(!vma || addr+len <= vma->vm_start)) {
6813+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6814 /* remember the address as a hint for next time */
6815 return (mm->free_area_cache = addr);
6816 }
6817@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6818 mm->cached_hole_size = vma->vm_start - addr;
6819
6820 /* try just below the current vma->vm_start */
6821- addr = (vma->vm_start-len) & HPAGE_MASK;
6822- } while (likely(len < vma->vm_start));
6823+ addr = skip_heap_stack_gap(vma, len);
6824+ } while (!IS_ERR_VALUE(addr));
6825
6826 bottomup:
6827 /*
6828@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6829 if (addr) {
6830 addr = ALIGN(addr, HPAGE_SIZE);
6831 vma = find_vma(mm, addr);
6832- if (task_size - len >= addr &&
6833- (!vma || addr + len <= vma->vm_start))
6834+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6835 return addr;
6836 }
6837 if (mm->get_unmapped_area == arch_get_unmapped_area)
6838diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6839index 7b00de6..78239f4 100644
6840--- a/arch/sparc/mm/init_32.c
6841+++ b/arch/sparc/mm/init_32.c
6842@@ -316,6 +316,9 @@ extern void device_scan(void);
6843 pgprot_t PAGE_SHARED __read_mostly;
6844 EXPORT_SYMBOL(PAGE_SHARED);
6845
6846+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6847+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6848+
6849 void __init paging_init(void)
6850 {
6851 switch(sparc_cpu_model) {
6852@@ -344,17 +347,17 @@ void __init paging_init(void)
6853
6854 /* Initialize the protection map with non-constant, MMU dependent values. */
6855 protection_map[0] = PAGE_NONE;
6856- protection_map[1] = PAGE_READONLY;
6857- protection_map[2] = PAGE_COPY;
6858- protection_map[3] = PAGE_COPY;
6859+ protection_map[1] = PAGE_READONLY_NOEXEC;
6860+ protection_map[2] = PAGE_COPY_NOEXEC;
6861+ protection_map[3] = PAGE_COPY_NOEXEC;
6862 protection_map[4] = PAGE_READONLY;
6863 protection_map[5] = PAGE_READONLY;
6864 protection_map[6] = PAGE_COPY;
6865 protection_map[7] = PAGE_COPY;
6866 protection_map[8] = PAGE_NONE;
6867- protection_map[9] = PAGE_READONLY;
6868- protection_map[10] = PAGE_SHARED;
6869- protection_map[11] = PAGE_SHARED;
6870+ protection_map[9] = PAGE_READONLY_NOEXEC;
6871+ protection_map[10] = PAGE_SHARED_NOEXEC;
6872+ protection_map[11] = PAGE_SHARED_NOEXEC;
6873 protection_map[12] = PAGE_READONLY;
6874 protection_map[13] = PAGE_READONLY;
6875 protection_map[14] = PAGE_SHARED;
6876diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6877index cbef74e..c38fead 100644
6878--- a/arch/sparc/mm/srmmu.c
6879+++ b/arch/sparc/mm/srmmu.c
6880@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6881 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6882 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6883 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6884+
6885+#ifdef CONFIG_PAX_PAGEEXEC
6886+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6887+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6888+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6889+#endif
6890+
6891 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6892 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6893
6894diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
6895index 27fe667..36d474c 100644
6896--- a/arch/tile/include/asm/atomic_64.h
6897+++ b/arch/tile/include/asm/atomic_64.h
6898@@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
6899
6900 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6901
6902+#define atomic64_read_unchecked(v) atomic64_read(v)
6903+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6904+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6905+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6906+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6907+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6908+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6909+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6910+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6911+
6912 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
6913 #define smp_mb__before_atomic_dec() smp_mb()
6914 #define smp_mb__after_atomic_dec() smp_mb()
6915diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
6916index 392e533..536b092 100644
6917--- a/arch/tile/include/asm/cache.h
6918+++ b/arch/tile/include/asm/cache.h
6919@@ -15,11 +15,12 @@
6920 #ifndef _ASM_TILE_CACHE_H
6921 #define _ASM_TILE_CACHE_H
6922
6923+#include <linux/const.h>
6924 #include <arch/chip.h>
6925
6926 /* bytes per L1 data cache line */
6927 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
6928-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6929+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6930
6931 /* bytes per L2 cache line */
6932 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
6933diff --git a/arch/um/Makefile b/arch/um/Makefile
6934index 28688e6..4c0aa1c 100644
6935--- a/arch/um/Makefile
6936+++ b/arch/um/Makefile
6937@@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6938 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6939 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
6940
6941+ifdef CONSTIFY_PLUGIN
6942+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6943+endif
6944+
6945 #This will adjust *FLAGS accordingly to the platform.
6946 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
6947
6948diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
6949index 19e1bdd..3665b77 100644
6950--- a/arch/um/include/asm/cache.h
6951+++ b/arch/um/include/asm/cache.h
6952@@ -1,6 +1,7 @@
6953 #ifndef __UM_CACHE_H
6954 #define __UM_CACHE_H
6955
6956+#include <linux/const.h>
6957
6958 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
6959 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6960@@ -12,6 +13,6 @@
6961 # define L1_CACHE_SHIFT 5
6962 #endif
6963
6964-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6965+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6966
6967 #endif
6968diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6969index 6c03acd..a5e0215 100644
6970--- a/arch/um/include/asm/kmap_types.h
6971+++ b/arch/um/include/asm/kmap_types.h
6972@@ -23,6 +23,7 @@ enum km_type {
6973 KM_IRQ1,
6974 KM_SOFTIRQ0,
6975 KM_SOFTIRQ1,
6976+ KM_CLEARPAGE,
6977 KM_TYPE_NR
6978 };
6979
6980diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6981index 7cfc3ce..cbd1a58 100644
6982--- a/arch/um/include/asm/page.h
6983+++ b/arch/um/include/asm/page.h
6984@@ -14,6 +14,9 @@
6985 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6986 #define PAGE_MASK (~(PAGE_SIZE-1))
6987
6988+#define ktla_ktva(addr) (addr)
6989+#define ktva_ktla(addr) (addr)
6990+
6991 #ifndef __ASSEMBLY__
6992
6993 struct page;
6994diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6995index 69f2490..2634831 100644
6996--- a/arch/um/kernel/process.c
6997+++ b/arch/um/kernel/process.c
6998@@ -408,22 +408,6 @@ int singlestepping(void * t)
6999 return 2;
7000 }
7001
7002-/*
7003- * Only x86 and x86_64 have an arch_align_stack().
7004- * All other arches have "#define arch_align_stack(x) (x)"
7005- * in their asm/system.h
7006- * As this is included in UML from asm-um/system-generic.h,
7007- * we can use it to behave as the subarch does.
7008- */
7009-#ifndef arch_align_stack
7010-unsigned long arch_align_stack(unsigned long sp)
7011-{
7012- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7013- sp -= get_random_int() % 8192;
7014- return sp & ~0xf;
7015-}
7016-#endif
7017-
7018 unsigned long get_wchan(struct task_struct *p)
7019 {
7020 unsigned long stack_page, sp, ip;
7021diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7022index ad8f795..2c7eec6 100644
7023--- a/arch/unicore32/include/asm/cache.h
7024+++ b/arch/unicore32/include/asm/cache.h
7025@@ -12,8 +12,10 @@
7026 #ifndef __UNICORE_CACHE_H__
7027 #define __UNICORE_CACHE_H__
7028
7029-#define L1_CACHE_SHIFT (5)
7030-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7031+#include <linux/const.h>
7032+
7033+#define L1_CACHE_SHIFT 5
7034+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7035
7036 /*
7037 * Memory returned by kmalloc() may be used for DMA, so we must make
7038diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7039index 5bed94e..fbcf200 100644
7040--- a/arch/x86/Kconfig
7041+++ b/arch/x86/Kconfig
7042@@ -226,7 +226,7 @@ config X86_HT
7043
7044 config X86_32_LAZY_GS
7045 def_bool y
7046- depends on X86_32 && !CC_STACKPROTECTOR
7047+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7048
7049 config ARCH_HWEIGHT_CFLAGS
7050 string
7051@@ -1058,7 +1058,7 @@ choice
7052
7053 config NOHIGHMEM
7054 bool "off"
7055- depends on !X86_NUMAQ
7056+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7057 ---help---
7058 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7059 However, the address space of 32-bit x86 processors is only 4
7060@@ -1095,7 +1095,7 @@ config NOHIGHMEM
7061
7062 config HIGHMEM4G
7063 bool "4GB"
7064- depends on !X86_NUMAQ
7065+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7066 ---help---
7067 Select this if you have a 32-bit processor and between 1 and 4
7068 gigabytes of physical RAM.
7069@@ -1149,7 +1149,7 @@ config PAGE_OFFSET
7070 hex
7071 default 0xB0000000 if VMSPLIT_3G_OPT
7072 default 0x80000000 if VMSPLIT_2G
7073- default 0x78000000 if VMSPLIT_2G_OPT
7074+ default 0x70000000 if VMSPLIT_2G_OPT
7075 default 0x40000000 if VMSPLIT_1G
7076 default 0xC0000000
7077 depends on X86_32
7078@@ -1539,6 +1539,7 @@ config SECCOMP
7079
7080 config CC_STACKPROTECTOR
7081 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7082+ depends on X86_64 || !PAX_MEMORY_UDEREF
7083 ---help---
7084 This option turns on the -fstack-protector GCC feature. This
7085 feature puts, at the beginning of functions, a canary value on
7086@@ -1596,6 +1597,7 @@ config KEXEC_JUMP
7087 config PHYSICAL_START
7088 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7089 default "0x1000000"
7090+ range 0x400000 0x40000000
7091 ---help---
7092 This gives the physical address where the kernel is loaded.
7093
7094@@ -1659,6 +1661,7 @@ config X86_NEED_RELOCS
7095 config PHYSICAL_ALIGN
7096 hex "Alignment value to which kernel should be aligned" if X86_32
7097 default "0x1000000"
7098+ range 0x400000 0x1000000 if PAX_KERNEXEC
7099 range 0x2000 0x1000000
7100 ---help---
7101 This value puts the alignment restrictions on physical address
7102@@ -1690,9 +1693,10 @@ config HOTPLUG_CPU
7103 Say N if you want to disable CPU hotplug.
7104
7105 config COMPAT_VDSO
7106- def_bool y
7107+ def_bool n
7108 prompt "Compat VDSO support"
7109 depends on X86_32 || IA32_EMULATION
7110+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7111 ---help---
7112 Map the 32-bit VDSO to the predictable old-style address too.
7113
7114diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7115index 3c57033..22d44aa 100644
7116--- a/arch/x86/Kconfig.cpu
7117+++ b/arch/x86/Kconfig.cpu
7118@@ -335,7 +335,7 @@ config X86_PPRO_FENCE
7119
7120 config X86_F00F_BUG
7121 def_bool y
7122- depends on M586MMX || M586TSC || M586 || M486 || M386
7123+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7124
7125 config X86_INVD_BUG
7126 def_bool y
7127@@ -359,7 +359,7 @@ config X86_POPAD_OK
7128
7129 config X86_ALIGNMENT_16
7130 def_bool y
7131- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7132+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7133
7134 config X86_INTEL_USERCOPY
7135 def_bool y
7136@@ -405,7 +405,7 @@ config X86_CMPXCHG64
7137 # generates cmov.
7138 config X86_CMOV
7139 def_bool y
7140- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7141+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7142
7143 config X86_MINIMUM_CPU_FAMILY
7144 int
7145diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7146index e46c214..7c72b55 100644
7147--- a/arch/x86/Kconfig.debug
7148+++ b/arch/x86/Kconfig.debug
7149@@ -84,7 +84,7 @@ config X86_PTDUMP
7150 config DEBUG_RODATA
7151 bool "Write protect kernel read-only data structures"
7152 default y
7153- depends on DEBUG_KERNEL
7154+ depends on DEBUG_KERNEL && BROKEN
7155 ---help---
7156 Mark the kernel read-only data as write-protected in the pagetables,
7157 in order to catch accidental (and incorrect) writes to such const
7158@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7159
7160 config DEBUG_SET_MODULE_RONX
7161 bool "Set loadable kernel module data as NX and text as RO"
7162- depends on MODULES
7163+ depends on MODULES && BROKEN
7164 ---help---
7165 This option helps catch unintended modifications to loadable
7166 kernel module's text and read-only data. It also prevents execution
7167diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7168index 209ba12..15140db 100644
7169--- a/arch/x86/Makefile
7170+++ b/arch/x86/Makefile
7171@@ -46,6 +46,7 @@ else
7172 UTS_MACHINE := x86_64
7173 CHECKFLAGS += -D__x86_64__ -m64
7174
7175+ biarch := $(call cc-option,-m64)
7176 KBUILD_AFLAGS += -m64
7177 KBUILD_CFLAGS += -m64
7178
7179@@ -201,3 +202,12 @@ define archhelp
7180 echo ' FDARGS="..." arguments for the booted kernel'
7181 echo ' FDINITRD=file initrd for the booted kernel'
7182 endef
7183+
7184+define OLD_LD
7185+
7186+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7187+*** Please upgrade your binutils to 2.18 or newer
7188+endef
7189+
7190+archprepare:
7191+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7192diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7193index 95365a8..52f857b 100644
7194--- a/arch/x86/boot/Makefile
7195+++ b/arch/x86/boot/Makefile
7196@@ -63,6 +63,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7197 $(call cc-option, -fno-stack-protector) \
7198 $(call cc-option, -mpreferred-stack-boundary=2)
7199 KBUILD_CFLAGS += $(call cc-option, -m32)
7200+ifdef CONSTIFY_PLUGIN
7201+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7202+endif
7203 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7204 GCOV_PROFILE := n
7205
7206diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7207index 878e4b9..20537ab 100644
7208--- a/arch/x86/boot/bitops.h
7209+++ b/arch/x86/boot/bitops.h
7210@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7211 u8 v;
7212 const u32 *p = (const u32 *)addr;
7213
7214- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7215+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7216 return v;
7217 }
7218
7219@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7220
7221 static inline void set_bit(int nr, void *addr)
7222 {
7223- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7224+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7225 }
7226
7227 #endif /* BOOT_BITOPS_H */
7228diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7229index c7093bd..d4247ffe0 100644
7230--- a/arch/x86/boot/boot.h
7231+++ b/arch/x86/boot/boot.h
7232@@ -85,7 +85,7 @@ static inline void io_delay(void)
7233 static inline u16 ds(void)
7234 {
7235 u16 seg;
7236- asm("movw %%ds,%0" : "=rm" (seg));
7237+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7238 return seg;
7239 }
7240
7241@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7242 static inline int memcmp(const void *s1, const void *s2, size_t len)
7243 {
7244 u8 diff;
7245- asm("repe; cmpsb; setnz %0"
7246+ asm volatile("repe; cmpsb; setnz %0"
7247 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7248 return diff;
7249 }
7250diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7251index b123b9a..2cf2f23 100644
7252--- a/arch/x86/boot/compressed/Makefile
7253+++ b/arch/x86/boot/compressed/Makefile
7254@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7255 KBUILD_CFLAGS += $(cflags-y)
7256 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7257 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7258+ifdef CONSTIFY_PLUGIN
7259+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7260+endif
7261
7262 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7263 GCOV_PROFILE := n
7264diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7265index a055993..47e126c 100644
7266--- a/arch/x86/boot/compressed/head_32.S
7267+++ b/arch/x86/boot/compressed/head_32.S
7268@@ -98,7 +98,7 @@ preferred_addr:
7269 notl %eax
7270 andl %eax, %ebx
7271 #else
7272- movl $LOAD_PHYSICAL_ADDR, %ebx
7273+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7274 #endif
7275
7276 /* Target address to relocate to for decompression */
7277@@ -184,7 +184,7 @@ relocated:
7278 * and where it was actually loaded.
7279 */
7280 movl %ebp, %ebx
7281- subl $LOAD_PHYSICAL_ADDR, %ebx
7282+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7283 jz 2f /* Nothing to be done if loaded at compiled addr. */
7284 /*
7285 * Process relocations.
7286@@ -192,8 +192,7 @@ relocated:
7287
7288 1: subl $4, %edi
7289 movl (%edi), %ecx
7290- testl %ecx, %ecx
7291- jz 2f
7292+ jecxz 2f
7293 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7294 jmp 1b
7295 2:
7296diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7297index 558d76c..606aa24 100644
7298--- a/arch/x86/boot/compressed/head_64.S
7299+++ b/arch/x86/boot/compressed/head_64.S
7300@@ -91,7 +91,7 @@ ENTRY(startup_32)
7301 notl %eax
7302 andl %eax, %ebx
7303 #else
7304- movl $LOAD_PHYSICAL_ADDR, %ebx
7305+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7306 #endif
7307
7308 /* Target address to relocate to for decompression */
7309@@ -253,7 +253,7 @@ preferred_addr:
7310 notq %rax
7311 andq %rax, %rbp
7312 #else
7313- movq $LOAD_PHYSICAL_ADDR, %rbp
7314+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7315 #endif
7316
7317 /* Target address to relocate to for decompression */
7318diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7319index 7116dcb..d9ae1d7 100644
7320--- a/arch/x86/boot/compressed/misc.c
7321+++ b/arch/x86/boot/compressed/misc.c
7322@@ -310,7 +310,7 @@ static void parse_elf(void *output)
7323 case PT_LOAD:
7324 #ifdef CONFIG_RELOCATABLE
7325 dest = output;
7326- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7327+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7328 #else
7329 dest = (void *)(phdr->p_paddr);
7330 #endif
7331@@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7332 error("Destination address too large");
7333 #endif
7334 #ifndef CONFIG_RELOCATABLE
7335- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7336+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7337 error("Wrong destination address");
7338 #endif
7339
7340diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7341index 89bbf4e..869908e 100644
7342--- a/arch/x86/boot/compressed/relocs.c
7343+++ b/arch/x86/boot/compressed/relocs.c
7344@@ -13,8 +13,11 @@
7345
7346 static void die(char *fmt, ...);
7347
7348+#include "../../../../include/generated/autoconf.h"
7349+
7350 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7351 static Elf32_Ehdr ehdr;
7352+static Elf32_Phdr *phdr;
7353 static unsigned long reloc_count, reloc_idx;
7354 static unsigned long *relocs;
7355
7356@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
7357 }
7358 }
7359
7360+static void read_phdrs(FILE *fp)
7361+{
7362+ unsigned int i;
7363+
7364+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7365+ if (!phdr) {
7366+ die("Unable to allocate %d program headers\n",
7367+ ehdr.e_phnum);
7368+ }
7369+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7370+ die("Seek to %d failed: %s\n",
7371+ ehdr.e_phoff, strerror(errno));
7372+ }
7373+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7374+ die("Cannot read ELF program headers: %s\n",
7375+ strerror(errno));
7376+ }
7377+ for(i = 0; i < ehdr.e_phnum; i++) {
7378+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7379+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7380+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7381+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7382+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7383+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7384+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7385+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7386+ }
7387+
7388+}
7389+
7390 static void read_shdrs(FILE *fp)
7391 {
7392- int i;
7393+ unsigned int i;
7394 Elf32_Shdr shdr;
7395
7396 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7397@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
7398
7399 static void read_strtabs(FILE *fp)
7400 {
7401- int i;
7402+ unsigned int i;
7403 for (i = 0; i < ehdr.e_shnum; i++) {
7404 struct section *sec = &secs[i];
7405 if (sec->shdr.sh_type != SHT_STRTAB) {
7406@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
7407
7408 static void read_symtabs(FILE *fp)
7409 {
7410- int i,j;
7411+ unsigned int i,j;
7412 for (i = 0; i < ehdr.e_shnum; i++) {
7413 struct section *sec = &secs[i];
7414 if (sec->shdr.sh_type != SHT_SYMTAB) {
7415@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
7416
7417 static void read_relocs(FILE *fp)
7418 {
7419- int i,j;
7420+ unsigned int i,j;
7421+ uint32_t base;
7422+
7423 for (i = 0; i < ehdr.e_shnum; i++) {
7424 struct section *sec = &secs[i];
7425 if (sec->shdr.sh_type != SHT_REL) {
7426@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
7427 die("Cannot read symbol table: %s\n",
7428 strerror(errno));
7429 }
7430+ base = 0;
7431+ for (j = 0; j < ehdr.e_phnum; j++) {
7432+ if (phdr[j].p_type != PT_LOAD )
7433+ continue;
7434+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7435+ continue;
7436+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7437+ break;
7438+ }
7439 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7440 Elf32_Rel *rel = &sec->reltab[j];
7441- rel->r_offset = elf32_to_cpu(rel->r_offset);
7442+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7443 rel->r_info = elf32_to_cpu(rel->r_info);
7444 }
7445 }
7446@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
7447
7448 static void print_absolute_symbols(void)
7449 {
7450- int i;
7451+ unsigned int i;
7452 printf("Absolute symbols\n");
7453 printf(" Num: Value Size Type Bind Visibility Name\n");
7454 for (i = 0; i < ehdr.e_shnum; i++) {
7455 struct section *sec = &secs[i];
7456 char *sym_strtab;
7457 Elf32_Sym *sh_symtab;
7458- int j;
7459+ unsigned int j;
7460
7461 if (sec->shdr.sh_type != SHT_SYMTAB) {
7462 continue;
7463@@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
7464
7465 static void print_absolute_relocs(void)
7466 {
7467- int i, printed = 0;
7468+ unsigned int i, printed = 0;
7469
7470 for (i = 0; i < ehdr.e_shnum; i++) {
7471 struct section *sec = &secs[i];
7472 struct section *sec_applies, *sec_symtab;
7473 char *sym_strtab;
7474 Elf32_Sym *sh_symtab;
7475- int j;
7476+ unsigned int j;
7477 if (sec->shdr.sh_type != SHT_REL) {
7478 continue;
7479 }
7480@@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
7481
7482 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7483 {
7484- int i;
7485+ unsigned int i;
7486 /* Walk through the relocations */
7487 for (i = 0; i < ehdr.e_shnum; i++) {
7488 char *sym_strtab;
7489 Elf32_Sym *sh_symtab;
7490 struct section *sec_applies, *sec_symtab;
7491- int j;
7492+ unsigned int j;
7493 struct section *sec = &secs[i];
7494
7495 if (sec->shdr.sh_type != SHT_REL) {
7496@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7497 !is_rel_reloc(sym_name(sym_strtab, sym))) {
7498 continue;
7499 }
7500+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7501+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7502+ continue;
7503+
7504+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7505+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7506+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7507+ continue;
7508+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7509+ continue;
7510+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7511+ continue;
7512+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7513+ continue;
7514+#endif
7515+
7516 switch (r_type) {
7517 case R_386_NONE:
7518 case R_386_PC32:
7519@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, const void *vb)
7520
7521 static void emit_relocs(int as_text)
7522 {
7523- int i;
7524+ unsigned int i;
7525 /* Count how many relocations I have and allocate space for them. */
7526 reloc_count = 0;
7527 walk_relocs(count_reloc);
7528@@ -665,6 +725,7 @@ int main(int argc, char **argv)
7529 fname, strerror(errno));
7530 }
7531 read_ehdr(fp);
7532+ read_phdrs(fp);
7533 read_shdrs(fp);
7534 read_strtabs(fp);
7535 read_symtabs(fp);
7536diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7537index 4d3ff03..e4972ff 100644
7538--- a/arch/x86/boot/cpucheck.c
7539+++ b/arch/x86/boot/cpucheck.c
7540@@ -74,7 +74,7 @@ static int has_fpu(void)
7541 u16 fcw = -1, fsw = -1;
7542 u32 cr0;
7543
7544- asm("movl %%cr0,%0" : "=r" (cr0));
7545+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
7546 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7547 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7548 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7549@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7550 {
7551 u32 f0, f1;
7552
7553- asm("pushfl ; "
7554+ asm volatile("pushfl ; "
7555 "pushfl ; "
7556 "popl %0 ; "
7557 "movl %0,%1 ; "
7558@@ -115,7 +115,7 @@ static void get_flags(void)
7559 set_bit(X86_FEATURE_FPU, cpu.flags);
7560
7561 if (has_eflag(X86_EFLAGS_ID)) {
7562- asm("cpuid"
7563+ asm volatile("cpuid"
7564 : "=a" (max_intel_level),
7565 "=b" (cpu_vendor[0]),
7566 "=d" (cpu_vendor[1]),
7567@@ -124,7 +124,7 @@ static void get_flags(void)
7568
7569 if (max_intel_level >= 0x00000001 &&
7570 max_intel_level <= 0x0000ffff) {
7571- asm("cpuid"
7572+ asm volatile("cpuid"
7573 : "=a" (tfms),
7574 "=c" (cpu.flags[4]),
7575 "=d" (cpu.flags[0])
7576@@ -136,7 +136,7 @@ static void get_flags(void)
7577 cpu.model += ((tfms >> 16) & 0xf) << 4;
7578 }
7579
7580- asm("cpuid"
7581+ asm volatile("cpuid"
7582 : "=a" (max_amd_level)
7583 : "a" (0x80000000)
7584 : "ebx", "ecx", "edx");
7585@@ -144,7 +144,7 @@ static void get_flags(void)
7586 if (max_amd_level >= 0x80000001 &&
7587 max_amd_level <= 0x8000ffff) {
7588 u32 eax = 0x80000001;
7589- asm("cpuid"
7590+ asm volatile("cpuid"
7591 : "+a" (eax),
7592 "=c" (cpu.flags[6]),
7593 "=d" (cpu.flags[1])
7594@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7595 u32 ecx = MSR_K7_HWCR;
7596 u32 eax, edx;
7597
7598- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7599+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7600 eax &= ~(1 << 15);
7601- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7602+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7603
7604 get_flags(); /* Make sure it really did something */
7605 err = check_flags();
7606@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7607 u32 ecx = MSR_VIA_FCR;
7608 u32 eax, edx;
7609
7610- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7611+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7612 eax |= (1<<1)|(1<<7);
7613- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7614+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7615
7616 set_bit(X86_FEATURE_CX8, cpu.flags);
7617 err = check_flags();
7618@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7619 u32 eax, edx;
7620 u32 level = 1;
7621
7622- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7623- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7624- asm("cpuid"
7625+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7626+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7627+ asm volatile("cpuid"
7628 : "+a" (level), "=d" (cpu.flags[0])
7629 : : "ecx", "ebx");
7630- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7631+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7632
7633 err = check_flags();
7634 }
7635diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7636index f1bbeeb..aff09cb 100644
7637--- a/arch/x86/boot/header.S
7638+++ b/arch/x86/boot/header.S
7639@@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7640 # single linked list of
7641 # struct setup_data
7642
7643-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7644+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7645
7646 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7647 #define VO_INIT_SIZE (VO__end - VO__text)
7648diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7649index db75d07..8e6d0af 100644
7650--- a/arch/x86/boot/memory.c
7651+++ b/arch/x86/boot/memory.c
7652@@ -19,7 +19,7 @@
7653
7654 static int detect_memory_e820(void)
7655 {
7656- int count = 0;
7657+ unsigned int count = 0;
7658 struct biosregs ireg, oreg;
7659 struct e820entry *desc = boot_params.e820_map;
7660 static struct e820entry buf; /* static so it is zeroed */
7661diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7662index 11e8c6e..fdbb1ed 100644
7663--- a/arch/x86/boot/video-vesa.c
7664+++ b/arch/x86/boot/video-vesa.c
7665@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7666
7667 boot_params.screen_info.vesapm_seg = oreg.es;
7668 boot_params.screen_info.vesapm_off = oreg.di;
7669+ boot_params.screen_info.vesapm_size = oreg.cx;
7670 }
7671
7672 /*
7673diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7674index 43eda28..5ab5fdb 100644
7675--- a/arch/x86/boot/video.c
7676+++ b/arch/x86/boot/video.c
7677@@ -96,7 +96,7 @@ static void store_mode_params(void)
7678 static unsigned int get_entry(void)
7679 {
7680 char entry_buf[4];
7681- int i, len = 0;
7682+ unsigned int i, len = 0;
7683 int key;
7684 unsigned int v;
7685
7686diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7687index 5b577d5..3c1fed4 100644
7688--- a/arch/x86/crypto/aes-x86_64-asm_64.S
7689+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7690@@ -8,6 +8,8 @@
7691 * including this sentence is retained in full.
7692 */
7693
7694+#include <asm/alternative-asm.h>
7695+
7696 .extern crypto_ft_tab
7697 .extern crypto_it_tab
7698 .extern crypto_fl_tab
7699@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7700 je B192; \
7701 leaq 32(r9),r9;
7702
7703+#define ret pax_force_retaddr 0, 1; ret
7704+
7705 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7706 movq r1,r2; \
7707 movq r3,r4; \
7708diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7709index be6d9e3..21fbbca 100644
7710--- a/arch/x86/crypto/aesni-intel_asm.S
7711+++ b/arch/x86/crypto/aesni-intel_asm.S
7712@@ -31,6 +31,7 @@
7713
7714 #include <linux/linkage.h>
7715 #include <asm/inst.h>
7716+#include <asm/alternative-asm.h>
7717
7718 #ifdef __x86_64__
7719 .data
7720@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
7721 pop %r14
7722 pop %r13
7723 pop %r12
7724+ pax_force_retaddr 0, 1
7725 ret
7726+ENDPROC(aesni_gcm_dec)
7727
7728
7729 /*****************************************************************************
7730@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
7731 pop %r14
7732 pop %r13
7733 pop %r12
7734+ pax_force_retaddr 0, 1
7735 ret
7736+ENDPROC(aesni_gcm_enc)
7737
7738 #endif
7739
7740@@ -1714,6 +1719,7 @@ _key_expansion_256a:
7741 pxor %xmm1, %xmm0
7742 movaps %xmm0, (TKEYP)
7743 add $0x10, TKEYP
7744+ pax_force_retaddr_bts
7745 ret
7746
7747 .align 4
7748@@ -1738,6 +1744,7 @@ _key_expansion_192a:
7749 shufps $0b01001110, %xmm2, %xmm1
7750 movaps %xmm1, 0x10(TKEYP)
7751 add $0x20, TKEYP
7752+ pax_force_retaddr_bts
7753 ret
7754
7755 .align 4
7756@@ -1757,6 +1764,7 @@ _key_expansion_192b:
7757
7758 movaps %xmm0, (TKEYP)
7759 add $0x10, TKEYP
7760+ pax_force_retaddr_bts
7761 ret
7762
7763 .align 4
7764@@ -1769,6 +1777,7 @@ _key_expansion_256b:
7765 pxor %xmm1, %xmm2
7766 movaps %xmm2, (TKEYP)
7767 add $0x10, TKEYP
7768+ pax_force_retaddr_bts
7769 ret
7770
7771 /*
7772@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
7773 #ifndef __x86_64__
7774 popl KEYP
7775 #endif
7776+ pax_force_retaddr 0, 1
7777 ret
7778+ENDPROC(aesni_set_key)
7779
7780 /*
7781 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7782@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
7783 popl KLEN
7784 popl KEYP
7785 #endif
7786+ pax_force_retaddr 0, 1
7787 ret
7788+ENDPROC(aesni_enc)
7789
7790 /*
7791 * _aesni_enc1: internal ABI
7792@@ -1959,6 +1972,7 @@ _aesni_enc1:
7793 AESENC KEY STATE
7794 movaps 0x70(TKEYP), KEY
7795 AESENCLAST KEY STATE
7796+ pax_force_retaddr_bts
7797 ret
7798
7799 /*
7800@@ -2067,6 +2081,7 @@ _aesni_enc4:
7801 AESENCLAST KEY STATE2
7802 AESENCLAST KEY STATE3
7803 AESENCLAST KEY STATE4
7804+ pax_force_retaddr_bts
7805 ret
7806
7807 /*
7808@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
7809 popl KLEN
7810 popl KEYP
7811 #endif
7812+ pax_force_retaddr 0, 1
7813 ret
7814+ENDPROC(aesni_dec)
7815
7816 /*
7817 * _aesni_dec1: internal ABI
7818@@ -2146,6 +2163,7 @@ _aesni_dec1:
7819 AESDEC KEY STATE
7820 movaps 0x70(TKEYP), KEY
7821 AESDECLAST KEY STATE
7822+ pax_force_retaddr_bts
7823 ret
7824
7825 /*
7826@@ -2254,6 +2272,7 @@ _aesni_dec4:
7827 AESDECLAST KEY STATE2
7828 AESDECLAST KEY STATE3
7829 AESDECLAST KEY STATE4
7830+ pax_force_retaddr_bts
7831 ret
7832
7833 /*
7834@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
7835 popl KEYP
7836 popl LEN
7837 #endif
7838+ pax_force_retaddr 0, 1
7839 ret
7840+ENDPROC(aesni_ecb_enc)
7841
7842 /*
7843 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7844@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
7845 popl KEYP
7846 popl LEN
7847 #endif
7848+ pax_force_retaddr 0, 1
7849 ret
7850+ENDPROC(aesni_ecb_dec)
7851
7852 /*
7853 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7854@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
7855 popl LEN
7856 popl IVP
7857 #endif
7858+ pax_force_retaddr 0, 1
7859 ret
7860+ENDPROC(aesni_cbc_enc)
7861
7862 /*
7863 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7864@@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
7865 popl LEN
7866 popl IVP
7867 #endif
7868+ pax_force_retaddr 0, 1
7869 ret
7870+ENDPROC(aesni_cbc_dec)
7871
7872 #ifdef __x86_64__
7873 .align 16
7874@@ -2524,6 +2551,7 @@ _aesni_inc_init:
7875 mov $1, TCTR_LOW
7876 MOVQ_R64_XMM TCTR_LOW INC
7877 MOVQ_R64_XMM CTR TCTR_LOW
7878+ pax_force_retaddr_bts
7879 ret
7880
7881 /*
7882@@ -2552,6 +2580,7 @@ _aesni_inc:
7883 .Linc_low:
7884 movaps CTR, IV
7885 PSHUFB_XMM BSWAP_MASK IV
7886+ pax_force_retaddr_bts
7887 ret
7888
7889 /*
7890@@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
7891 .Lctr_enc_ret:
7892 movups IV, (IVP)
7893 .Lctr_enc_just_ret:
7894+ pax_force_retaddr 0, 1
7895 ret
7896+ENDPROC(aesni_ctr_enc)
7897 #endif
7898diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
7899index 545d0ce..14841a6 100644
7900--- a/arch/x86/crypto/aesni-intel_glue.c
7901+++ b/arch/x86/crypto/aesni-intel_glue.c
7902@@ -929,6 +929,8 @@ out_free_ablkcipher:
7903 }
7904
7905 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
7906+ unsigned int key_len) __size_overflow(3);
7907+static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
7908 unsigned int key_len)
7909 {
7910 int ret = 0;
7911diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
7912index 391d245..67f35c2 100644
7913--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
7914+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
7915@@ -20,6 +20,8 @@
7916 *
7917 */
7918
7919+#include <asm/alternative-asm.h>
7920+
7921 .file "blowfish-x86_64-asm.S"
7922 .text
7923
7924@@ -151,9 +153,11 @@ __blowfish_enc_blk:
7925 jnz __enc_xor;
7926
7927 write_block();
7928+ pax_force_retaddr 0, 1
7929 ret;
7930 __enc_xor:
7931 xor_block();
7932+ pax_force_retaddr 0, 1
7933 ret;
7934
7935 .align 8
7936@@ -188,6 +192,7 @@ blowfish_dec_blk:
7937
7938 movq %r11, %rbp;
7939
7940+ pax_force_retaddr 0, 1
7941 ret;
7942
7943 /**********************************************************************
7944@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
7945
7946 popq %rbx;
7947 popq %rbp;
7948+ pax_force_retaddr 0, 1
7949 ret;
7950
7951 __enc_xor4:
7952@@ -349,6 +355,7 @@ __enc_xor4:
7953
7954 popq %rbx;
7955 popq %rbp;
7956+ pax_force_retaddr 0, 1
7957 ret;
7958
7959 .align 8
7960@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
7961 popq %rbx;
7962 popq %rbp;
7963
7964+ pax_force_retaddr 0, 1
7965 ret;
7966
7967diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7968index 6214a9b..1f4fc9a 100644
7969--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7970+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7971@@ -1,3 +1,5 @@
7972+#include <asm/alternative-asm.h>
7973+
7974 # enter ECRYPT_encrypt_bytes
7975 .text
7976 .p2align 5
7977@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7978 add %r11,%rsp
7979 mov %rdi,%rax
7980 mov %rsi,%rdx
7981+ pax_force_retaddr 0, 1
7982 ret
7983 # bytesatleast65:
7984 ._bytesatleast65:
7985@@ -891,6 +894,7 @@ ECRYPT_keysetup:
7986 add %r11,%rsp
7987 mov %rdi,%rax
7988 mov %rsi,%rdx
7989+ pax_force_retaddr
7990 ret
7991 # enter ECRYPT_ivsetup
7992 .text
7993@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7994 add %r11,%rsp
7995 mov %rdi,%rax
7996 mov %rsi,%rdx
7997+ pax_force_retaddr
7998 ret
7999diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8000index 7f24a15..9cd3ffe 100644
8001--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8002+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8003@@ -24,6 +24,8 @@
8004 *
8005 */
8006
8007+#include <asm/alternative-asm.h>
8008+
8009 .file "serpent-sse2-x86_64-asm_64.S"
8010 .text
8011
8012@@ -695,12 +697,14 @@ __serpent_enc_blk_8way:
8013 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8014 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8015
8016+ pax_force_retaddr
8017 ret;
8018
8019 __enc_xor8:
8020 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8021 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8022
8023+ pax_force_retaddr
8024 ret;
8025
8026 .align 8
8027@@ -758,4 +762,5 @@ serpent_dec_blk_8way:
8028 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8029 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8030
8031+ pax_force_retaddr
8032 ret;
8033diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8034index b2c2f57..8470cab 100644
8035--- a/arch/x86/crypto/sha1_ssse3_asm.S
8036+++ b/arch/x86/crypto/sha1_ssse3_asm.S
8037@@ -28,6 +28,8 @@
8038 * (at your option) any later version.
8039 */
8040
8041+#include <asm/alternative-asm.h>
8042+
8043 #define CTX %rdi // arg1
8044 #define BUF %rsi // arg2
8045 #define CNT %rdx // arg3
8046@@ -104,6 +106,7 @@
8047 pop %r12
8048 pop %rbp
8049 pop %rbx
8050+ pax_force_retaddr 0, 1
8051 ret
8052
8053 .size \name, .-\name
8054diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8055index 5b012a2..36d5364 100644
8056--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8057+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8058@@ -20,6 +20,8 @@
8059 *
8060 */
8061
8062+#include <asm/alternative-asm.h>
8063+
8064 .file "twofish-x86_64-asm-3way.S"
8065 .text
8066
8067@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8068 popq %r13;
8069 popq %r14;
8070 popq %r15;
8071+ pax_force_retaddr 0, 1
8072 ret;
8073
8074 __enc_xor3:
8075@@ -271,6 +274,7 @@ __enc_xor3:
8076 popq %r13;
8077 popq %r14;
8078 popq %r15;
8079+ pax_force_retaddr 0, 1
8080 ret;
8081
8082 .global twofish_dec_blk_3way
8083@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8084 popq %r13;
8085 popq %r14;
8086 popq %r15;
8087+ pax_force_retaddr 0, 1
8088 ret;
8089
8090diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8091index 7bcf3fc..f53832f 100644
8092--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8093+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8094@@ -21,6 +21,7 @@
8095 .text
8096
8097 #include <asm/asm-offsets.h>
8098+#include <asm/alternative-asm.h>
8099
8100 #define a_offset 0
8101 #define b_offset 4
8102@@ -268,6 +269,7 @@ twofish_enc_blk:
8103
8104 popq R1
8105 movq $1,%rax
8106+ pax_force_retaddr 0, 1
8107 ret
8108
8109 twofish_dec_blk:
8110@@ -319,4 +321,5 @@ twofish_dec_blk:
8111
8112 popq R1
8113 movq $1,%rax
8114+ pax_force_retaddr 0, 1
8115 ret
8116diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8117index 39e4909..887aa7e 100644
8118--- a/arch/x86/ia32/ia32_aout.c
8119+++ b/arch/x86/ia32/ia32_aout.c
8120@@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8121 unsigned long dump_start, dump_size;
8122 struct user32 dump;
8123
8124+ memset(&dump, 0, sizeof(dump));
8125+
8126 fs = get_fs();
8127 set_fs(KERNEL_DS);
8128 has_dumped = 1;
8129diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8130index 6557769..ef6ae89 100644
8131--- a/arch/x86/ia32/ia32_signal.c
8132+++ b/arch/x86/ia32/ia32_signal.c
8133@@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8134 }
8135 seg = get_fs();
8136 set_fs(KERNEL_DS);
8137- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8138+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8139 set_fs(seg);
8140 if (ret >= 0 && uoss_ptr) {
8141 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8142@@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8143 */
8144 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8145 size_t frame_size,
8146- void **fpstate)
8147+ void __user **fpstate)
8148 {
8149 unsigned long sp;
8150
8151@@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8152
8153 if (used_math()) {
8154 sp = sp - sig_xstate_ia32_size;
8155- *fpstate = (struct _fpstate_ia32 *) sp;
8156+ *fpstate = (struct _fpstate_ia32 __user *) sp;
8157 if (save_i387_xstate_ia32(*fpstate) < 0)
8158 return (void __user *) -1L;
8159 }
8160@@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8161 sp -= frame_size;
8162 /* Align the stack pointer according to the i386 ABI,
8163 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8164- sp = ((sp + 4) & -16ul) - 4;
8165+ sp = ((sp - 12) & -16ul) - 4;
8166 return (void __user *) sp;
8167 }
8168
8169@@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8170 * These are actually not used anymore, but left because some
8171 * gdb versions depend on them as a marker.
8172 */
8173- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8174+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8175 } put_user_catch(err);
8176
8177 if (err)
8178@@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8179 0xb8,
8180 __NR_ia32_rt_sigreturn,
8181 0x80cd,
8182- 0,
8183+ 0
8184 };
8185
8186 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8187@@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8188
8189 if (ka->sa.sa_flags & SA_RESTORER)
8190 restorer = ka->sa.sa_restorer;
8191+ else if (current->mm->context.vdso)
8192+ /* Return stub is in 32bit vsyscall page */
8193+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8194 else
8195- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8196- rt_sigreturn);
8197+ restorer = &frame->retcode;
8198 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8199
8200 /*
8201 * Not actually used anymore, but left because some gdb
8202 * versions need it.
8203 */
8204- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8205+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8206 } put_user_catch(err);
8207
8208 if (err)
8209diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8210index e3e7340..05ed805 100644
8211--- a/arch/x86/ia32/ia32entry.S
8212+++ b/arch/x86/ia32/ia32entry.S
8213@@ -13,8 +13,10 @@
8214 #include <asm/thread_info.h>
8215 #include <asm/segment.h>
8216 #include <asm/irqflags.h>
8217+#include <asm/pgtable.h>
8218 #include <linux/linkage.h>
8219 #include <linux/err.h>
8220+#include <asm/alternative-asm.h>
8221
8222 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8223 #include <linux/elf-em.h>
8224@@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
8225 ENDPROC(native_irq_enable_sysexit)
8226 #endif
8227
8228+ .macro pax_enter_kernel_user
8229+ pax_set_fptr_mask
8230+#ifdef CONFIG_PAX_MEMORY_UDEREF
8231+ call pax_enter_kernel_user
8232+#endif
8233+ .endm
8234+
8235+ .macro pax_exit_kernel_user
8236+#ifdef CONFIG_PAX_MEMORY_UDEREF
8237+ call pax_exit_kernel_user
8238+#endif
8239+#ifdef CONFIG_PAX_RANDKSTACK
8240+ pushq %rax
8241+ pushq %r11
8242+ call pax_randomize_kstack
8243+ popq %r11
8244+ popq %rax
8245+#endif
8246+ .endm
8247+
8248+.macro pax_erase_kstack
8249+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8250+ call pax_erase_kstack
8251+#endif
8252+.endm
8253+
8254 /*
8255 * 32bit SYSENTER instruction entry.
8256 *
8257@@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
8258 CFI_REGISTER rsp,rbp
8259 SWAPGS_UNSAFE_STACK
8260 movq PER_CPU_VAR(kernel_stack), %rsp
8261- addq $(KERNEL_STACK_OFFSET),%rsp
8262- /*
8263- * No need to follow this irqs on/off section: the syscall
8264- * disabled irqs, here we enable it straight after entry:
8265- */
8266- ENABLE_INTERRUPTS(CLBR_NONE)
8267 movl %ebp,%ebp /* zero extension */
8268 pushq_cfi $__USER32_DS
8269 /*CFI_REL_OFFSET ss,0*/
8270@@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
8271 CFI_REL_OFFSET rsp,0
8272 pushfq_cfi
8273 /*CFI_REL_OFFSET rflags,0*/
8274- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8275- CFI_REGISTER rip,r10
8276+ orl $X86_EFLAGS_IF,(%rsp)
8277+ GET_THREAD_INFO(%r11)
8278+ movl TI_sysenter_return(%r11), %r11d
8279+ CFI_REGISTER rip,r11
8280 pushq_cfi $__USER32_CS
8281 /*CFI_REL_OFFSET cs,0*/
8282 movl %eax, %eax
8283- pushq_cfi %r10
8284+ pushq_cfi %r11
8285 CFI_REL_OFFSET rip,0
8286 pushq_cfi %rax
8287 cld
8288 SAVE_ARGS 0,1,0
8289+ pax_enter_kernel_user
8290+ /*
8291+ * No need to follow this irqs on/off section: the syscall
8292+ * disabled irqs, here we enable it straight after entry:
8293+ */
8294+ ENABLE_INTERRUPTS(CLBR_NONE)
8295 /* no need to do an access_ok check here because rbp has been
8296 32bit zero extended */
8297+
8298+#ifdef CONFIG_PAX_MEMORY_UDEREF
8299+ mov $PAX_USER_SHADOW_BASE,%r11
8300+ add %r11,%rbp
8301+#endif
8302+
8303 1: movl (%rbp),%ebp
8304 .section __ex_table,"a"
8305 .quad 1b,ia32_badarg
8306 .previous
8307- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8308- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8309+ GET_THREAD_INFO(%r11)
8310+ orl $TS_COMPAT,TI_status(%r11)
8311+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8312 CFI_REMEMBER_STATE
8313 jnz sysenter_tracesys
8314 cmpq $(IA32_NR_syscalls-1),%rax
8315@@ -160,12 +197,15 @@ sysenter_do_call:
8316 sysenter_dispatch:
8317 call *ia32_sys_call_table(,%rax,8)
8318 movq %rax,RAX-ARGOFFSET(%rsp)
8319+ GET_THREAD_INFO(%r11)
8320 DISABLE_INTERRUPTS(CLBR_NONE)
8321 TRACE_IRQS_OFF
8322- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8323+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8324 jnz sysexit_audit
8325 sysexit_from_sys_call:
8326- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8327+ pax_exit_kernel_user
8328+ pax_erase_kstack
8329+ andl $~TS_COMPAT,TI_status(%r11)
8330 /* clear IF, that popfq doesn't enable interrupts early */
8331 andl $~0x200,EFLAGS-R11(%rsp)
8332 movl RIP-R11(%rsp),%edx /* User %eip */
8333@@ -191,6 +231,9 @@ sysexit_from_sys_call:
8334 movl %eax,%esi /* 2nd arg: syscall number */
8335 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8336 call __audit_syscall_entry
8337+
8338+ pax_erase_kstack
8339+
8340 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8341 cmpq $(IA32_NR_syscalls-1),%rax
8342 ja ia32_badsys
8343@@ -202,7 +245,7 @@ sysexit_from_sys_call:
8344 .endm
8345
8346 .macro auditsys_exit exit
8347- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8348+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8349 jnz ia32_ret_from_sys_call
8350 TRACE_IRQS_ON
8351 sti
8352@@ -213,11 +256,12 @@ sysexit_from_sys_call:
8353 1: setbe %al /* 1 if error, 0 if not */
8354 movzbl %al,%edi /* zero-extend that into %edi */
8355 call __audit_syscall_exit
8356+ GET_THREAD_INFO(%r11)
8357 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8358 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8359 cli
8360 TRACE_IRQS_OFF
8361- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8362+ testl %edi,TI_flags(%r11)
8363 jz \exit
8364 CLEAR_RREGS -ARGOFFSET
8365 jmp int_with_check
8366@@ -235,7 +279,7 @@ sysexit_audit:
8367
8368 sysenter_tracesys:
8369 #ifdef CONFIG_AUDITSYSCALL
8370- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8371+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8372 jz sysenter_auditsys
8373 #endif
8374 SAVE_REST
8375@@ -243,6 +287,9 @@ sysenter_tracesys:
8376 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8377 movq %rsp,%rdi /* &pt_regs -> arg1 */
8378 call syscall_trace_enter
8379+
8380+ pax_erase_kstack
8381+
8382 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8383 RESTORE_REST
8384 cmpq $(IA32_NR_syscalls-1),%rax
8385@@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8386 ENTRY(ia32_cstar_target)
8387 CFI_STARTPROC32 simple
8388 CFI_SIGNAL_FRAME
8389- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8390+ CFI_DEF_CFA rsp,0
8391 CFI_REGISTER rip,rcx
8392 /*CFI_REGISTER rflags,r11*/
8393 SWAPGS_UNSAFE_STACK
8394 movl %esp,%r8d
8395 CFI_REGISTER rsp,r8
8396 movq PER_CPU_VAR(kernel_stack),%rsp
8397+ SAVE_ARGS 8*6,0,0
8398+ pax_enter_kernel_user
8399 /*
8400 * No need to follow this irqs on/off section: the syscall
8401 * disabled irqs and here we enable it straight after entry:
8402 */
8403 ENABLE_INTERRUPTS(CLBR_NONE)
8404- SAVE_ARGS 8,0,0
8405 movl %eax,%eax /* zero extension */
8406 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8407 movq %rcx,RIP-ARGOFFSET(%rsp)
8408@@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
8409 /* no need to do an access_ok check here because r8 has been
8410 32bit zero extended */
8411 /* hardware stack frame is complete now */
8412+
8413+#ifdef CONFIG_PAX_MEMORY_UDEREF
8414+ mov $PAX_USER_SHADOW_BASE,%r11
8415+ add %r11,%r8
8416+#endif
8417+
8418 1: movl (%r8),%r9d
8419 .section __ex_table,"a"
8420 .quad 1b,ia32_badarg
8421 .previous
8422- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8423- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8424+ GET_THREAD_INFO(%r11)
8425+ orl $TS_COMPAT,TI_status(%r11)
8426+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8427 CFI_REMEMBER_STATE
8428 jnz cstar_tracesys
8429 cmpq $IA32_NR_syscalls-1,%rax
8430@@ -317,12 +372,15 @@ cstar_do_call:
8431 cstar_dispatch:
8432 call *ia32_sys_call_table(,%rax,8)
8433 movq %rax,RAX-ARGOFFSET(%rsp)
8434+ GET_THREAD_INFO(%r11)
8435 DISABLE_INTERRUPTS(CLBR_NONE)
8436 TRACE_IRQS_OFF
8437- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8438+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8439 jnz sysretl_audit
8440 sysretl_from_sys_call:
8441- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8442+ pax_exit_kernel_user
8443+ pax_erase_kstack
8444+ andl $~TS_COMPAT,TI_status(%r11)
8445 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8446 movl RIP-ARGOFFSET(%rsp),%ecx
8447 CFI_REGISTER rip,rcx
8448@@ -350,7 +408,7 @@ sysretl_audit:
8449
8450 cstar_tracesys:
8451 #ifdef CONFIG_AUDITSYSCALL
8452- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8453+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8454 jz cstar_auditsys
8455 #endif
8456 xchgl %r9d,%ebp
8457@@ -359,6 +417,9 @@ cstar_tracesys:
8458 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8459 movq %rsp,%rdi /* &pt_regs -> arg1 */
8460 call syscall_trace_enter
8461+
8462+ pax_erase_kstack
8463+
8464 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8465 RESTORE_REST
8466 xchgl %ebp,%r9d
8467@@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
8468 CFI_REL_OFFSET rip,RIP-RIP
8469 PARAVIRT_ADJUST_EXCEPTION_FRAME
8470 SWAPGS
8471- /*
8472- * No need to follow this irqs on/off section: the syscall
8473- * disabled irqs and here we enable it straight after entry:
8474- */
8475- ENABLE_INTERRUPTS(CLBR_NONE)
8476 movl %eax,%eax
8477 pushq_cfi %rax
8478 cld
8479 /* note the registers are not zero extended to the sf.
8480 this could be a problem. */
8481 SAVE_ARGS 0,1,0
8482- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8483- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8484+ pax_enter_kernel_user
8485+ /*
8486+ * No need to follow this irqs on/off section: the syscall
8487+ * disabled irqs and here we enable it straight after entry:
8488+ */
8489+ ENABLE_INTERRUPTS(CLBR_NONE)
8490+ GET_THREAD_INFO(%r11)
8491+ orl $TS_COMPAT,TI_status(%r11)
8492+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8493 jnz ia32_tracesys
8494 cmpq $(IA32_NR_syscalls-1),%rax
8495 ja ia32_badsys
8496@@ -435,6 +498,9 @@ ia32_tracesys:
8497 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8498 movq %rsp,%rdi /* &pt_regs -> arg1 */
8499 call syscall_trace_enter
8500+
8501+ pax_erase_kstack
8502+
8503 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8504 RESTORE_REST
8505 cmpq $(IA32_NR_syscalls-1),%rax
8506diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8507index f6f5c53..b358b28 100644
8508--- a/arch/x86/ia32/sys_ia32.c
8509+++ b/arch/x86/ia32/sys_ia32.c
8510@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8511 */
8512 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8513 {
8514- typeof(ubuf->st_uid) uid = 0;
8515- typeof(ubuf->st_gid) gid = 0;
8516+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
8517+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
8518 SET_UID(uid, stat->uid);
8519 SET_GID(gid, stat->gid);
8520 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8521@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8522 }
8523 set_fs(KERNEL_DS);
8524 ret = sys_rt_sigprocmask(how,
8525- set ? (sigset_t __user *)&s : NULL,
8526- oset ? (sigset_t __user *)&s : NULL,
8527+ set ? (sigset_t __force_user *)&s : NULL,
8528+ oset ? (sigset_t __force_user *)&s : NULL,
8529 sigsetsize);
8530 set_fs(old_fs);
8531 if (ret)
8532@@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
8533 return alarm_setitimer(seconds);
8534 }
8535
8536-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
8537+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
8538 int options)
8539 {
8540 return compat_sys_wait4(pid, stat_addr, options, NULL);
8541@@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8542 mm_segment_t old_fs = get_fs();
8543
8544 set_fs(KERNEL_DS);
8545- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8546+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8547 set_fs(old_fs);
8548 if (put_compat_timespec(&t, interval))
8549 return -EFAULT;
8550@@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8551 mm_segment_t old_fs = get_fs();
8552
8553 set_fs(KERNEL_DS);
8554- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8555+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8556 set_fs(old_fs);
8557 if (!ret) {
8558 switch (_NSIG_WORDS) {
8559@@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8560 if (copy_siginfo_from_user32(&info, uinfo))
8561 return -EFAULT;
8562 set_fs(KERNEL_DS);
8563- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8564+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8565 set_fs(old_fs);
8566 return ret;
8567 }
8568@@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8569 return -EFAULT;
8570
8571 set_fs(KERNEL_DS);
8572- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8573+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8574 count);
8575 set_fs(old_fs);
8576
8577diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8578index 952bd01..7692c6f 100644
8579--- a/arch/x86/include/asm/alternative-asm.h
8580+++ b/arch/x86/include/asm/alternative-asm.h
8581@@ -15,6 +15,45 @@
8582 .endm
8583 #endif
8584
8585+#ifdef KERNEXEC_PLUGIN
8586+ .macro pax_force_retaddr_bts rip=0
8587+ btsq $63,\rip(%rsp)
8588+ .endm
8589+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8590+ .macro pax_force_retaddr rip=0, reload=0
8591+ btsq $63,\rip(%rsp)
8592+ .endm
8593+ .macro pax_force_fptr ptr
8594+ btsq $63,\ptr
8595+ .endm
8596+ .macro pax_set_fptr_mask
8597+ .endm
8598+#endif
8599+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8600+ .macro pax_force_retaddr rip=0, reload=0
8601+ .if \reload
8602+ pax_set_fptr_mask
8603+ .endif
8604+ orq %r10,\rip(%rsp)
8605+ .endm
8606+ .macro pax_force_fptr ptr
8607+ orq %r10,\ptr
8608+ .endm
8609+ .macro pax_set_fptr_mask
8610+ movabs $0x8000000000000000,%r10
8611+ .endm
8612+#endif
8613+#else
8614+ .macro pax_force_retaddr rip=0, reload=0
8615+ .endm
8616+ .macro pax_force_fptr ptr
8617+ .endm
8618+ .macro pax_force_retaddr_bts rip=0
8619+ .endm
8620+ .macro pax_set_fptr_mask
8621+ .endm
8622+#endif
8623+
8624 .macro altinstruction_entry orig alt feature orig_len alt_len
8625 .long \orig - .
8626 .long \alt - .
8627diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8628index 37ad100..7d47faa 100644
8629--- a/arch/x86/include/asm/alternative.h
8630+++ b/arch/x86/include/asm/alternative.h
8631@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
8632 ".section .discard,\"aw\",@progbits\n" \
8633 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
8634 ".previous\n" \
8635- ".section .altinstr_replacement, \"ax\"\n" \
8636+ ".section .altinstr_replacement, \"a\"\n" \
8637 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8638 ".previous"
8639
8640diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8641index 3ab9bdd..238033e 100644
8642--- a/arch/x86/include/asm/apic.h
8643+++ b/arch/x86/include/asm/apic.h
8644@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
8645
8646 #ifdef CONFIG_X86_LOCAL_APIC
8647
8648-extern unsigned int apic_verbosity;
8649+extern int apic_verbosity;
8650 extern int local_apic_timer_c2_ok;
8651
8652 extern int disable_apic;
8653diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8654index 20370c6..a2eb9b0 100644
8655--- a/arch/x86/include/asm/apm.h
8656+++ b/arch/x86/include/asm/apm.h
8657@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8658 __asm__ __volatile__(APM_DO_ZERO_SEGS
8659 "pushl %%edi\n\t"
8660 "pushl %%ebp\n\t"
8661- "lcall *%%cs:apm_bios_entry\n\t"
8662+ "lcall *%%ss:apm_bios_entry\n\t"
8663 "setc %%al\n\t"
8664 "popl %%ebp\n\t"
8665 "popl %%edi\n\t"
8666@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8667 __asm__ __volatile__(APM_DO_ZERO_SEGS
8668 "pushl %%edi\n\t"
8669 "pushl %%ebp\n\t"
8670- "lcall *%%cs:apm_bios_entry\n\t"
8671+ "lcall *%%ss:apm_bios_entry\n\t"
8672 "setc %%bl\n\t"
8673 "popl %%ebp\n\t"
8674 "popl %%edi\n\t"
8675diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
8676index 58cb6d4..ca9010d 100644
8677--- a/arch/x86/include/asm/atomic.h
8678+++ b/arch/x86/include/asm/atomic.h
8679@@ -22,7 +22,18 @@
8680 */
8681 static inline int atomic_read(const atomic_t *v)
8682 {
8683- return (*(volatile int *)&(v)->counter);
8684+ return (*(volatile const int *)&(v)->counter);
8685+}
8686+
8687+/**
8688+ * atomic_read_unchecked - read atomic variable
8689+ * @v: pointer of type atomic_unchecked_t
8690+ *
8691+ * Atomically reads the value of @v.
8692+ */
8693+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8694+{
8695+ return (*(volatile const int *)&(v)->counter);
8696 }
8697
8698 /**
8699@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
8700 }
8701
8702 /**
8703+ * atomic_set_unchecked - set atomic variable
8704+ * @v: pointer of type atomic_unchecked_t
8705+ * @i: required value
8706+ *
8707+ * Atomically sets the value of @v to @i.
8708+ */
8709+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8710+{
8711+ v->counter = i;
8712+}
8713+
8714+/**
8715 * atomic_add - add integer to atomic variable
8716 * @i: integer value to add
8717 * @v: pointer of type atomic_t
8718@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
8719 */
8720 static inline void atomic_add(int i, atomic_t *v)
8721 {
8722- asm volatile(LOCK_PREFIX "addl %1,%0"
8723+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8724+
8725+#ifdef CONFIG_PAX_REFCOUNT
8726+ "jno 0f\n"
8727+ LOCK_PREFIX "subl %1,%0\n"
8728+ "int $4\n0:\n"
8729+ _ASM_EXTABLE(0b, 0b)
8730+#endif
8731+
8732+ : "+m" (v->counter)
8733+ : "ir" (i));
8734+}
8735+
8736+/**
8737+ * atomic_add_unchecked - add integer to atomic variable
8738+ * @i: integer value to add
8739+ * @v: pointer of type atomic_unchecked_t
8740+ *
8741+ * Atomically adds @i to @v.
8742+ */
8743+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8744+{
8745+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8746 : "+m" (v->counter)
8747 : "ir" (i));
8748 }
8749@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
8750 */
8751 static inline void atomic_sub(int i, atomic_t *v)
8752 {
8753- asm volatile(LOCK_PREFIX "subl %1,%0"
8754+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8755+
8756+#ifdef CONFIG_PAX_REFCOUNT
8757+ "jno 0f\n"
8758+ LOCK_PREFIX "addl %1,%0\n"
8759+ "int $4\n0:\n"
8760+ _ASM_EXTABLE(0b, 0b)
8761+#endif
8762+
8763+ : "+m" (v->counter)
8764+ : "ir" (i));
8765+}
8766+
8767+/**
8768+ * atomic_sub_unchecked - subtract integer from atomic variable
8769+ * @i: integer value to subtract
8770+ * @v: pointer of type atomic_unchecked_t
8771+ *
8772+ * Atomically subtracts @i from @v.
8773+ */
8774+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8775+{
8776+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8777 : "+m" (v->counter)
8778 : "ir" (i));
8779 }
8780@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8781 {
8782 unsigned char c;
8783
8784- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8785+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
8786+
8787+#ifdef CONFIG_PAX_REFCOUNT
8788+ "jno 0f\n"
8789+ LOCK_PREFIX "addl %2,%0\n"
8790+ "int $4\n0:\n"
8791+ _ASM_EXTABLE(0b, 0b)
8792+#endif
8793+
8794+ "sete %1\n"
8795 : "+m" (v->counter), "=qm" (c)
8796 : "ir" (i) : "memory");
8797 return c;
8798@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8799 */
8800 static inline void atomic_inc(atomic_t *v)
8801 {
8802- asm volatile(LOCK_PREFIX "incl %0"
8803+ asm volatile(LOCK_PREFIX "incl %0\n"
8804+
8805+#ifdef CONFIG_PAX_REFCOUNT
8806+ "jno 0f\n"
8807+ LOCK_PREFIX "decl %0\n"
8808+ "int $4\n0:\n"
8809+ _ASM_EXTABLE(0b, 0b)
8810+#endif
8811+
8812+ : "+m" (v->counter));
8813+}
8814+
8815+/**
8816+ * atomic_inc_unchecked - increment atomic variable
8817+ * @v: pointer of type atomic_unchecked_t
8818+ *
8819+ * Atomically increments @v by 1.
8820+ */
8821+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8822+{
8823+ asm volatile(LOCK_PREFIX "incl %0\n"
8824 : "+m" (v->counter));
8825 }
8826
8827@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
8828 */
8829 static inline void atomic_dec(atomic_t *v)
8830 {
8831- asm volatile(LOCK_PREFIX "decl %0"
8832+ asm volatile(LOCK_PREFIX "decl %0\n"
8833+
8834+#ifdef CONFIG_PAX_REFCOUNT
8835+ "jno 0f\n"
8836+ LOCK_PREFIX "incl %0\n"
8837+ "int $4\n0:\n"
8838+ _ASM_EXTABLE(0b, 0b)
8839+#endif
8840+
8841+ : "+m" (v->counter));
8842+}
8843+
8844+/**
8845+ * atomic_dec_unchecked - decrement atomic variable
8846+ * @v: pointer of type atomic_unchecked_t
8847+ *
8848+ * Atomically decrements @v by 1.
8849+ */
8850+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8851+{
8852+ asm volatile(LOCK_PREFIX "decl %0\n"
8853 : "+m" (v->counter));
8854 }
8855
8856@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8857 {
8858 unsigned char c;
8859
8860- asm volatile(LOCK_PREFIX "decl %0; sete %1"
8861+ asm volatile(LOCK_PREFIX "decl %0\n"
8862+
8863+#ifdef CONFIG_PAX_REFCOUNT
8864+ "jno 0f\n"
8865+ LOCK_PREFIX "incl %0\n"
8866+ "int $4\n0:\n"
8867+ _ASM_EXTABLE(0b, 0b)
8868+#endif
8869+
8870+ "sete %1\n"
8871 : "+m" (v->counter), "=qm" (c)
8872 : : "memory");
8873 return c != 0;
8874@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8875 {
8876 unsigned char c;
8877
8878- asm volatile(LOCK_PREFIX "incl %0; sete %1"
8879+ asm volatile(LOCK_PREFIX "incl %0\n"
8880+
8881+#ifdef CONFIG_PAX_REFCOUNT
8882+ "jno 0f\n"
8883+ LOCK_PREFIX "decl %0\n"
8884+ "int $4\n0:\n"
8885+ _ASM_EXTABLE(0b, 0b)
8886+#endif
8887+
8888+ "sete %1\n"
8889+ : "+m" (v->counter), "=qm" (c)
8890+ : : "memory");
8891+ return c != 0;
8892+}
8893+
8894+/**
8895+ * atomic_inc_and_test_unchecked - increment and test
8896+ * @v: pointer of type atomic_unchecked_t
8897+ *
8898+ * Atomically increments @v by 1
8899+ * and returns true if the result is zero, or false for all
8900+ * other cases.
8901+ */
8902+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8903+{
8904+ unsigned char c;
8905+
8906+ asm volatile(LOCK_PREFIX "incl %0\n"
8907+ "sete %1\n"
8908 : "+m" (v->counter), "=qm" (c)
8909 : : "memory");
8910 return c != 0;
8911@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8912 {
8913 unsigned char c;
8914
8915- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8916+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
8917+
8918+#ifdef CONFIG_PAX_REFCOUNT
8919+ "jno 0f\n"
8920+ LOCK_PREFIX "subl %2,%0\n"
8921+ "int $4\n0:\n"
8922+ _ASM_EXTABLE(0b, 0b)
8923+#endif
8924+
8925+ "sets %1\n"
8926 : "+m" (v->counter), "=qm" (c)
8927 : "ir" (i) : "memory");
8928 return c;
8929@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
8930 goto no_xadd;
8931 #endif
8932 /* Modern 486+ processor */
8933- return i + xadd(&v->counter, i);
8934+ return i + xadd_check_overflow(&v->counter, i);
8935
8936 #ifdef CONFIG_M386
8937 no_xadd: /* Legacy 386 processor */
8938@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
8939 }
8940
8941 /**
8942+ * atomic_add_return_unchecked - add integer and return
8943+ * @i: integer value to add
8944+ * @v: pointer of type atomic_unchecked_t
8945+ *
8946+ * Atomically adds @i to @v and returns @i + @v
8947+ */
8948+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8949+{
8950+#ifdef CONFIG_M386
8951+ int __i;
8952+ unsigned long flags;
8953+ if (unlikely(boot_cpu_data.x86 <= 3))
8954+ goto no_xadd;
8955+#endif
8956+ /* Modern 486+ processor */
8957+ return i + xadd(&v->counter, i);
8958+
8959+#ifdef CONFIG_M386
8960+no_xadd: /* Legacy 386 processor */
8961+ raw_local_irq_save(flags);
8962+ __i = atomic_read_unchecked(v);
8963+ atomic_set_unchecked(v, i + __i);
8964+ raw_local_irq_restore(flags);
8965+ return i + __i;
8966+#endif
8967+}
8968+
8969+/**
8970 * atomic_sub_return - subtract integer and return
8971 * @v: pointer of type atomic_t
8972 * @i: integer value to subtract
8973@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
8974 }
8975
8976 #define atomic_inc_return(v) (atomic_add_return(1, v))
8977+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8978+{
8979+ return atomic_add_return_unchecked(1, v);
8980+}
8981 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8982
8983 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8984@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8985 return cmpxchg(&v->counter, old, new);
8986 }
8987
8988+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8989+{
8990+ return cmpxchg(&v->counter, old, new);
8991+}
8992+
8993 static inline int atomic_xchg(atomic_t *v, int new)
8994 {
8995 return xchg(&v->counter, new);
8996 }
8997
8998+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8999+{
9000+ return xchg(&v->counter, new);
9001+}
9002+
9003 /**
9004 * __atomic_add_unless - add unless the number is already a given value
9005 * @v: pointer of type atomic_t
9006@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9007 */
9008 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9009 {
9010- int c, old;
9011+ int c, old, new;
9012 c = atomic_read(v);
9013 for (;;) {
9014- if (unlikely(c == (u)))
9015+ if (unlikely(c == u))
9016 break;
9017- old = atomic_cmpxchg((v), c, c + (a));
9018+
9019+ asm volatile("addl %2,%0\n"
9020+
9021+#ifdef CONFIG_PAX_REFCOUNT
9022+ "jno 0f\n"
9023+ "subl %2,%0\n"
9024+ "int $4\n0:\n"
9025+ _ASM_EXTABLE(0b, 0b)
9026+#endif
9027+
9028+ : "=r" (new)
9029+ : "0" (c), "ir" (a));
9030+
9031+ old = atomic_cmpxchg(v, c, new);
9032 if (likely(old == c))
9033 break;
9034 c = old;
9035@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9036 return c;
9037 }
9038
9039+/**
9040+ * atomic_inc_not_zero_hint - increment if not null
9041+ * @v: pointer of type atomic_t
9042+ * @hint: probable value of the atomic before the increment
9043+ *
9044+ * This version of atomic_inc_not_zero() gives a hint of probable
9045+ * value of the atomic. This helps processor to not read the memory
9046+ * before doing the atomic read/modify/write cycle, lowering
9047+ * number of bus transactions on some arches.
9048+ *
9049+ * Returns: 0 if increment was not done, 1 otherwise.
9050+ */
9051+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9052+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9053+{
9054+ int val, c = hint, new;
9055+
9056+ /* sanity test, should be removed by compiler if hint is a constant */
9057+ if (!hint)
9058+ return __atomic_add_unless(v, 1, 0);
9059+
9060+ do {
9061+ asm volatile("incl %0\n"
9062+
9063+#ifdef CONFIG_PAX_REFCOUNT
9064+ "jno 0f\n"
9065+ "decl %0\n"
9066+ "int $4\n0:\n"
9067+ _ASM_EXTABLE(0b, 0b)
9068+#endif
9069+
9070+ : "=r" (new)
9071+ : "0" (c));
9072+
9073+ val = atomic_cmpxchg(v, c, new);
9074+ if (val == c)
9075+ return 1;
9076+ c = val;
9077+ } while (c);
9078+
9079+ return 0;
9080+}
9081
9082 /*
9083 * atomic_dec_if_positive - decrement by 1 if old value positive
9084diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9085index fa13f0e..27c2e08 100644
9086--- a/arch/x86/include/asm/atomic64_32.h
9087+++ b/arch/x86/include/asm/atomic64_32.h
9088@@ -12,6 +12,14 @@ typedef struct {
9089 u64 __aligned(8) counter;
9090 } atomic64_t;
9091
9092+#ifdef CONFIG_PAX_REFCOUNT
9093+typedef struct {
9094+ u64 __aligned(8) counter;
9095+} atomic64_unchecked_t;
9096+#else
9097+typedef atomic64_t atomic64_unchecked_t;
9098+#endif
9099+
9100 #define ATOMIC64_INIT(val) { (val) }
9101
9102 #ifdef CONFIG_X86_CMPXCHG64
9103@@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9104 }
9105
9106 /**
9107+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9108+ * @p: pointer to type atomic64_unchecked_t
9109+ * @o: expected value
9110+ * @n: new value
9111+ *
9112+ * Atomically sets @v to @n if it was equal to @o and returns
9113+ * the old value.
9114+ */
9115+
9116+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9117+{
9118+ return cmpxchg64(&v->counter, o, n);
9119+}
9120+
9121+/**
9122 * atomic64_xchg - xchg atomic64 variable
9123 * @v: pointer to type atomic64_t
9124 * @n: value to assign
9125@@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9126 }
9127
9128 /**
9129+ * atomic64_set_unchecked - set atomic64 variable
9130+ * @v: pointer to type atomic64_unchecked_t
9131+ * @n: value to assign
9132+ *
9133+ * Atomically sets the value of @v to @n.
9134+ */
9135+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9136+{
9137+ unsigned high = (unsigned)(i >> 32);
9138+ unsigned low = (unsigned)i;
9139+ asm volatile(ATOMIC64_ALTERNATIVE(set)
9140+ : "+b" (low), "+c" (high)
9141+ : "S" (v)
9142+ : "eax", "edx", "memory"
9143+ );
9144+}
9145+
9146+/**
9147 * atomic64_read - read atomic64 variable
9148 * @v: pointer to type atomic64_t
9149 *
9150@@ -93,6 +134,22 @@ static inline long long atomic64_read(const atomic64_t *v)
9151 }
9152
9153 /**
9154+ * atomic64_read_unchecked - read atomic64 variable
9155+ * @v: pointer to type atomic64_unchecked_t
9156+ *
9157+ * Atomically reads the value of @v and returns it.
9158+ */
9159+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9160+{
9161+ long long r;
9162+ asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
9163+ : "=A" (r), "+c" (v)
9164+ : : "memory"
9165+ );
9166+ return r;
9167+ }
9168+
9169+/**
9170 * atomic64_add_return - add and return
9171 * @i: integer value to add
9172 * @v: pointer to type atomic64_t
9173@@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9174 return i;
9175 }
9176
9177+/**
9178+ * atomic64_add_return_unchecked - add and return
9179+ * @i: integer value to add
9180+ * @v: pointer to type atomic64_unchecked_t
9181+ *
9182+ * Atomically adds @i to @v and returns @i + *@v
9183+ */
9184+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9185+{
9186+ asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
9187+ : "+A" (i), "+c" (v)
9188+ : : "memory"
9189+ );
9190+ return i;
9191+}
9192+
9193 /*
9194 * Other variants with different arithmetic operators:
9195 */
9196@@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9197 return a;
9198 }
9199
9200+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9201+{
9202+ long long a;
9203+ asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
9204+ : "=A" (a)
9205+ : "S" (v)
9206+ : "memory", "ecx"
9207+ );
9208+ return a;
9209+}
9210+
9211 static inline long long atomic64_dec_return(atomic64_t *v)
9212 {
9213 long long a;
9214@@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9215 }
9216
9217 /**
9218+ * atomic64_add_unchecked - add integer to atomic64 variable
9219+ * @i: integer value to add
9220+ * @v: pointer to type atomic64_unchecked_t
9221+ *
9222+ * Atomically adds @i to @v.
9223+ */
9224+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9225+{
9226+ asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
9227+ : "+A" (i), "+c" (v)
9228+ : : "memory"
9229+ );
9230+ return i;
9231+}
9232+
9233+/**
9234 * atomic64_sub - subtract the atomic64 variable
9235 * @i: integer value to subtract
9236 * @v: pointer to type atomic64_t
9237diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9238index 0e1cbfc..5623683 100644
9239--- a/arch/x86/include/asm/atomic64_64.h
9240+++ b/arch/x86/include/asm/atomic64_64.h
9241@@ -18,7 +18,19 @@
9242 */
9243 static inline long atomic64_read(const atomic64_t *v)
9244 {
9245- return (*(volatile long *)&(v)->counter);
9246+ return (*(volatile const long *)&(v)->counter);
9247+}
9248+
9249+/**
9250+ * atomic64_read_unchecked - read atomic64 variable
9251+ * @v: pointer of type atomic64_unchecked_t
9252+ *
9253+ * Atomically reads the value of @v.
9254+ * Doesn't imply a read memory barrier.
9255+ */
9256+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9257+{
9258+ return (*(volatile const long *)&(v)->counter);
9259 }
9260
9261 /**
9262@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9263 }
9264
9265 /**
9266+ * atomic64_set_unchecked - set atomic64 variable
9267+ * @v: pointer to type atomic64_unchecked_t
9268+ * @i: required value
9269+ *
9270+ * Atomically sets the value of @v to @i.
9271+ */
9272+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9273+{
9274+ v->counter = i;
9275+}
9276+
9277+/**
9278 * atomic64_add - add integer to atomic64 variable
9279 * @i: integer value to add
9280 * @v: pointer to type atomic64_t
9281@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9282 */
9283 static inline void atomic64_add(long i, atomic64_t *v)
9284 {
9285+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
9286+
9287+#ifdef CONFIG_PAX_REFCOUNT
9288+ "jno 0f\n"
9289+ LOCK_PREFIX "subq %1,%0\n"
9290+ "int $4\n0:\n"
9291+ _ASM_EXTABLE(0b, 0b)
9292+#endif
9293+
9294+ : "=m" (v->counter)
9295+ : "er" (i), "m" (v->counter));
9296+}
9297+
9298+/**
9299+ * atomic64_add_unchecked - add integer to atomic64 variable
9300+ * @i: integer value to add
9301+ * @v: pointer to type atomic64_unchecked_t
9302+ *
9303+ * Atomically adds @i to @v.
9304+ */
9305+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9306+{
9307 asm volatile(LOCK_PREFIX "addq %1,%0"
9308 : "=m" (v->counter)
9309 : "er" (i), "m" (v->counter));
9310@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9311 */
9312 static inline void atomic64_sub(long i, atomic64_t *v)
9313 {
9314- asm volatile(LOCK_PREFIX "subq %1,%0"
9315+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9316+
9317+#ifdef CONFIG_PAX_REFCOUNT
9318+ "jno 0f\n"
9319+ LOCK_PREFIX "addq %1,%0\n"
9320+ "int $4\n0:\n"
9321+ _ASM_EXTABLE(0b, 0b)
9322+#endif
9323+
9324+ : "=m" (v->counter)
9325+ : "er" (i), "m" (v->counter));
9326+}
9327+
9328+/**
9329+ * atomic64_sub_unchecked - subtract the atomic64 variable
9330+ * @i: integer value to subtract
9331+ * @v: pointer to type atomic64_unchecked_t
9332+ *
9333+ * Atomically subtracts @i from @v.
9334+ */
9335+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9336+{
9337+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9338 : "=m" (v->counter)
9339 : "er" (i), "m" (v->counter));
9340 }
9341@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9342 {
9343 unsigned char c;
9344
9345- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9346+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
9347+
9348+#ifdef CONFIG_PAX_REFCOUNT
9349+ "jno 0f\n"
9350+ LOCK_PREFIX "addq %2,%0\n"
9351+ "int $4\n0:\n"
9352+ _ASM_EXTABLE(0b, 0b)
9353+#endif
9354+
9355+ "sete %1\n"
9356 : "=m" (v->counter), "=qm" (c)
9357 : "er" (i), "m" (v->counter) : "memory");
9358 return c;
9359@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9360 */
9361 static inline void atomic64_inc(atomic64_t *v)
9362 {
9363+ asm volatile(LOCK_PREFIX "incq %0\n"
9364+
9365+#ifdef CONFIG_PAX_REFCOUNT
9366+ "jno 0f\n"
9367+ LOCK_PREFIX "decq %0\n"
9368+ "int $4\n0:\n"
9369+ _ASM_EXTABLE(0b, 0b)
9370+#endif
9371+
9372+ : "=m" (v->counter)
9373+ : "m" (v->counter));
9374+}
9375+
9376+/**
9377+ * atomic64_inc_unchecked - increment atomic64 variable
9378+ * @v: pointer to type atomic64_unchecked_t
9379+ *
9380+ * Atomically increments @v by 1.
9381+ */
9382+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9383+{
9384 asm volatile(LOCK_PREFIX "incq %0"
9385 : "=m" (v->counter)
9386 : "m" (v->counter));
9387@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9388 */
9389 static inline void atomic64_dec(atomic64_t *v)
9390 {
9391- asm volatile(LOCK_PREFIX "decq %0"
9392+ asm volatile(LOCK_PREFIX "decq %0\n"
9393+
9394+#ifdef CONFIG_PAX_REFCOUNT
9395+ "jno 0f\n"
9396+ LOCK_PREFIX "incq %0\n"
9397+ "int $4\n0:\n"
9398+ _ASM_EXTABLE(0b, 0b)
9399+#endif
9400+
9401+ : "=m" (v->counter)
9402+ : "m" (v->counter));
9403+}
9404+
9405+/**
9406+ * atomic64_dec_unchecked - decrement atomic64 variable
9407+ * @v: pointer to type atomic64_t
9408+ *
9409+ * Atomically decrements @v by 1.
9410+ */
9411+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9412+{
9413+ asm volatile(LOCK_PREFIX "decq %0\n"
9414 : "=m" (v->counter)
9415 : "m" (v->counter));
9416 }
9417@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9418 {
9419 unsigned char c;
9420
9421- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9422+ asm volatile(LOCK_PREFIX "decq %0\n"
9423+
9424+#ifdef CONFIG_PAX_REFCOUNT
9425+ "jno 0f\n"
9426+ LOCK_PREFIX "incq %0\n"
9427+ "int $4\n0:\n"
9428+ _ASM_EXTABLE(0b, 0b)
9429+#endif
9430+
9431+ "sete %1\n"
9432 : "=m" (v->counter), "=qm" (c)
9433 : "m" (v->counter) : "memory");
9434 return c != 0;
9435@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9436 {
9437 unsigned char c;
9438
9439- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9440+ asm volatile(LOCK_PREFIX "incq %0\n"
9441+
9442+#ifdef CONFIG_PAX_REFCOUNT
9443+ "jno 0f\n"
9444+ LOCK_PREFIX "decq %0\n"
9445+ "int $4\n0:\n"
9446+ _ASM_EXTABLE(0b, 0b)
9447+#endif
9448+
9449+ "sete %1\n"
9450 : "=m" (v->counter), "=qm" (c)
9451 : "m" (v->counter) : "memory");
9452 return c != 0;
9453@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9454 {
9455 unsigned char c;
9456
9457- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9458+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
9459+
9460+#ifdef CONFIG_PAX_REFCOUNT
9461+ "jno 0f\n"
9462+ LOCK_PREFIX "subq %2,%0\n"
9463+ "int $4\n0:\n"
9464+ _ASM_EXTABLE(0b, 0b)
9465+#endif
9466+
9467+ "sets %1\n"
9468 : "=m" (v->counter), "=qm" (c)
9469 : "er" (i), "m" (v->counter) : "memory");
9470 return c;
9471@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9472 */
9473 static inline long atomic64_add_return(long i, atomic64_t *v)
9474 {
9475+ return i + xadd_check_overflow(&v->counter, i);
9476+}
9477+
9478+/**
9479+ * atomic64_add_return_unchecked - add and return
9480+ * @i: integer value to add
9481+ * @v: pointer to type atomic64_unchecked_t
9482+ *
9483+ * Atomically adds @i to @v and returns @i + @v
9484+ */
9485+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9486+{
9487 return i + xadd(&v->counter, i);
9488 }
9489
9490@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9491 }
9492
9493 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9494+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9495+{
9496+ return atomic64_add_return_unchecked(1, v);
9497+}
9498 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9499
9500 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9501@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9502 return cmpxchg(&v->counter, old, new);
9503 }
9504
9505+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9506+{
9507+ return cmpxchg(&v->counter, old, new);
9508+}
9509+
9510 static inline long atomic64_xchg(atomic64_t *v, long new)
9511 {
9512 return xchg(&v->counter, new);
9513@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
9514 */
9515 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9516 {
9517- long c, old;
9518+ long c, old, new;
9519 c = atomic64_read(v);
9520 for (;;) {
9521- if (unlikely(c == (u)))
9522+ if (unlikely(c == u))
9523 break;
9524- old = atomic64_cmpxchg((v), c, c + (a));
9525+
9526+ asm volatile("add %2,%0\n"
9527+
9528+#ifdef CONFIG_PAX_REFCOUNT
9529+ "jno 0f\n"
9530+ "sub %2,%0\n"
9531+ "int $4\n0:\n"
9532+ _ASM_EXTABLE(0b, 0b)
9533+#endif
9534+
9535+ : "=r" (new)
9536+ : "0" (c), "ir" (a));
9537+
9538+ old = atomic64_cmpxchg(v, c, new);
9539 if (likely(old == c))
9540 break;
9541 c = old;
9542 }
9543- return c != (u);
9544+ return c != u;
9545 }
9546
9547 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9548diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9549index b97596e..9bd48b06 100644
9550--- a/arch/x86/include/asm/bitops.h
9551+++ b/arch/x86/include/asm/bitops.h
9552@@ -38,7 +38,7 @@
9553 * a mask operation on a byte.
9554 */
9555 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9556-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9557+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9558 #define CONST_MASK(nr) (1 << ((nr) & 7))
9559
9560 /**
9561diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9562index 5e1a2ee..c9f9533 100644
9563--- a/arch/x86/include/asm/boot.h
9564+++ b/arch/x86/include/asm/boot.h
9565@@ -11,10 +11,15 @@
9566 #include <asm/pgtable_types.h>
9567
9568 /* Physical address where kernel should be loaded. */
9569-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9570+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9571 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9572 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9573
9574+#ifndef __ASSEMBLY__
9575+extern unsigned char __LOAD_PHYSICAL_ADDR[];
9576+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9577+#endif
9578+
9579 /* Minimum kernel alignment, as a power of two */
9580 #ifdef CONFIG_X86_64
9581 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9582diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9583index 48f99f1..d78ebf9 100644
9584--- a/arch/x86/include/asm/cache.h
9585+++ b/arch/x86/include/asm/cache.h
9586@@ -5,12 +5,13 @@
9587
9588 /* L1 cache line size */
9589 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9590-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9591+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9592
9593 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9594+#define __read_only __attribute__((__section__(".data..read_only")))
9595
9596 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
9597-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
9598+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
9599
9600 #ifdef CONFIG_X86_VSMP
9601 #ifdef CONFIG_SMP
9602diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9603index 4e12668..501d239 100644
9604--- a/arch/x86/include/asm/cacheflush.h
9605+++ b/arch/x86/include/asm/cacheflush.h
9606@@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
9607 unsigned long pg_flags = pg->flags & _PGMT_MASK;
9608
9609 if (pg_flags == _PGMT_DEFAULT)
9610- return -1;
9611+ return ~0UL;
9612 else if (pg_flags == _PGMT_WC)
9613 return _PAGE_CACHE_WC;
9614 else if (pg_flags == _PGMT_UC_MINUS)
9615diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9616index 46fc474..b02b0f9 100644
9617--- a/arch/x86/include/asm/checksum_32.h
9618+++ b/arch/x86/include/asm/checksum_32.h
9619@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9620 int len, __wsum sum,
9621 int *src_err_ptr, int *dst_err_ptr);
9622
9623+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9624+ int len, __wsum sum,
9625+ int *src_err_ptr, int *dst_err_ptr);
9626+
9627+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9628+ int len, __wsum sum,
9629+ int *src_err_ptr, int *dst_err_ptr);
9630+
9631 /*
9632 * Note: when you get a NULL pointer exception here this means someone
9633 * passed in an incorrect kernel address to one of these functions.
9634@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9635 int *err_ptr)
9636 {
9637 might_sleep();
9638- return csum_partial_copy_generic((__force void *)src, dst,
9639+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
9640 len, sum, err_ptr, NULL);
9641 }
9642
9643@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9644 {
9645 might_sleep();
9646 if (access_ok(VERIFY_WRITE, dst, len))
9647- return csum_partial_copy_generic(src, (__force void *)dst,
9648+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9649 len, sum, NULL, err_ptr);
9650
9651 if (len)
9652diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
9653index b3b7332..3935f40 100644
9654--- a/arch/x86/include/asm/cmpxchg.h
9655+++ b/arch/x86/include/asm/cmpxchg.h
9656@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
9657 __compiletime_error("Bad argument size for cmpxchg");
9658 extern void __xadd_wrong_size(void)
9659 __compiletime_error("Bad argument size for xadd");
9660+extern void __xadd_check_overflow_wrong_size(void)
9661+ __compiletime_error("Bad argument size for xadd_check_overflow");
9662 extern void __add_wrong_size(void)
9663 __compiletime_error("Bad argument size for add");
9664+extern void __add_check_overflow_wrong_size(void)
9665+ __compiletime_error("Bad argument size for add_check_overflow");
9666
9667 /*
9668 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
9669@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
9670 __ret; \
9671 })
9672
9673+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
9674+ ({ \
9675+ __typeof__ (*(ptr)) __ret = (arg); \
9676+ switch (sizeof(*(ptr))) { \
9677+ case __X86_CASE_L: \
9678+ asm volatile (lock #op "l %0, %1\n" \
9679+ "jno 0f\n" \
9680+ "mov %0,%1\n" \
9681+ "int $4\n0:\n" \
9682+ _ASM_EXTABLE(0b, 0b) \
9683+ : "+r" (__ret), "+m" (*(ptr)) \
9684+ : : "memory", "cc"); \
9685+ break; \
9686+ case __X86_CASE_Q: \
9687+ asm volatile (lock #op "q %q0, %1\n" \
9688+ "jno 0f\n" \
9689+ "mov %0,%1\n" \
9690+ "int $4\n0:\n" \
9691+ _ASM_EXTABLE(0b, 0b) \
9692+ : "+r" (__ret), "+m" (*(ptr)) \
9693+ : : "memory", "cc"); \
9694+ break; \
9695+ default: \
9696+ __ ## op ## _check_overflow_wrong_size(); \
9697+ } \
9698+ __ret; \
9699+ })
9700+
9701 /*
9702 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
9703 * Since this is generally used to protect other memory information, we
9704@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
9705 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
9706 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
9707
9708+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
9709+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
9710+
9711 #define __add(ptr, inc, lock) \
9712 ({ \
9713 __typeof__ (*(ptr)) __ret = (inc); \
9714diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
9715index 8d67d42..183d0eb 100644
9716--- a/arch/x86/include/asm/cpufeature.h
9717+++ b/arch/x86/include/asm/cpufeature.h
9718@@ -367,7 +367,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
9719 ".section .discard,\"aw\",@progbits\n"
9720 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
9721 ".previous\n"
9722- ".section .altinstr_replacement,\"ax\"\n"
9723+ ".section .altinstr_replacement,\"a\"\n"
9724 "3: movb $1,%0\n"
9725 "4:\n"
9726 ".previous\n"
9727diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9728index e95822d..a90010e 100644
9729--- a/arch/x86/include/asm/desc.h
9730+++ b/arch/x86/include/asm/desc.h
9731@@ -4,6 +4,7 @@
9732 #include <asm/desc_defs.h>
9733 #include <asm/ldt.h>
9734 #include <asm/mmu.h>
9735+#include <asm/pgtable.h>
9736
9737 #include <linux/smp.h>
9738
9739@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
9740
9741 desc->type = (info->read_exec_only ^ 1) << 1;
9742 desc->type |= info->contents << 2;
9743+ desc->type |= info->seg_not_present ^ 1;
9744
9745 desc->s = 1;
9746 desc->dpl = 0x3;
9747@@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
9748 }
9749
9750 extern struct desc_ptr idt_descr;
9751-extern gate_desc idt_table[];
9752 extern struct desc_ptr nmi_idt_descr;
9753-extern gate_desc nmi_idt_table[];
9754-
9755-struct gdt_page {
9756- struct desc_struct gdt[GDT_ENTRIES];
9757-} __attribute__((aligned(PAGE_SIZE)));
9758-
9759-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9760+extern gate_desc idt_table[256];
9761+extern gate_desc nmi_idt_table[256];
9762
9763+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9764 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9765 {
9766- return per_cpu(gdt_page, cpu).gdt;
9767+ return cpu_gdt_table[cpu];
9768 }
9769
9770 #ifdef CONFIG_X86_64
9771@@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9772 unsigned long base, unsigned dpl, unsigned flags,
9773 unsigned short seg)
9774 {
9775- gate->a = (seg << 16) | (base & 0xffff);
9776- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9777+ gate->gate.offset_low = base;
9778+ gate->gate.seg = seg;
9779+ gate->gate.reserved = 0;
9780+ gate->gate.type = type;
9781+ gate->gate.s = 0;
9782+ gate->gate.dpl = dpl;
9783+ gate->gate.p = 1;
9784+ gate->gate.offset_high = base >> 16;
9785 }
9786
9787 #endif
9788@@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9789
9790 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
9791 {
9792+ pax_open_kernel();
9793 memcpy(&idt[entry], gate, sizeof(*gate));
9794+ pax_close_kernel();
9795 }
9796
9797 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
9798 {
9799+ pax_open_kernel();
9800 memcpy(&ldt[entry], desc, 8);
9801+ pax_close_kernel();
9802 }
9803
9804 static inline void
9805@@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
9806 default: size = sizeof(*gdt); break;
9807 }
9808
9809+ pax_open_kernel();
9810 memcpy(&gdt[entry], desc, size);
9811+ pax_close_kernel();
9812 }
9813
9814 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9815@@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9816
9817 static inline void native_load_tr_desc(void)
9818 {
9819+ pax_open_kernel();
9820 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
9821+ pax_close_kernel();
9822 }
9823
9824 static inline void native_load_gdt(const struct desc_ptr *dtr)
9825@@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
9826 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
9827 unsigned int i;
9828
9829+ pax_open_kernel();
9830 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
9831 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
9832+ pax_close_kernel();
9833 }
9834
9835 #define _LDT_empty(info) \
9836@@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
9837 }
9838
9839 #ifdef CONFIG_X86_64
9840-static inline void set_nmi_gate(int gate, void *addr)
9841+static inline void set_nmi_gate(int gate, const void *addr)
9842 {
9843 gate_desc s;
9844
9845@@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
9846 }
9847 #endif
9848
9849-static inline void _set_gate(int gate, unsigned type, void *addr,
9850+static inline void _set_gate(int gate, unsigned type, const void *addr,
9851 unsigned dpl, unsigned ist, unsigned seg)
9852 {
9853 gate_desc s;
9854@@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
9855 * Pentium F0 0F bugfix can have resulted in the mapped
9856 * IDT being write-protected.
9857 */
9858-static inline void set_intr_gate(unsigned int n, void *addr)
9859+static inline void set_intr_gate(unsigned int n, const void *addr)
9860 {
9861 BUG_ON((unsigned)n > 0xFF);
9862 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
9863@@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
9864 /*
9865 * This routine sets up an interrupt gate at directory privilege level 3.
9866 */
9867-static inline void set_system_intr_gate(unsigned int n, void *addr)
9868+static inline void set_system_intr_gate(unsigned int n, const void *addr)
9869 {
9870 BUG_ON((unsigned)n > 0xFF);
9871 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
9872 }
9873
9874-static inline void set_system_trap_gate(unsigned int n, void *addr)
9875+static inline void set_system_trap_gate(unsigned int n, const void *addr)
9876 {
9877 BUG_ON((unsigned)n > 0xFF);
9878 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
9879 }
9880
9881-static inline void set_trap_gate(unsigned int n, void *addr)
9882+static inline void set_trap_gate(unsigned int n, const void *addr)
9883 {
9884 BUG_ON((unsigned)n > 0xFF);
9885 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
9886@@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
9887 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
9888 {
9889 BUG_ON((unsigned)n > 0xFF);
9890- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
9891+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
9892 }
9893
9894-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
9895+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
9896 {
9897 BUG_ON((unsigned)n > 0xFF);
9898 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
9899 }
9900
9901-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
9902+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
9903 {
9904 BUG_ON((unsigned)n > 0xFF);
9905 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
9906 }
9907
9908+#ifdef CONFIG_X86_32
9909+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
9910+{
9911+ struct desc_struct d;
9912+
9913+ if (likely(limit))
9914+ limit = (limit - 1UL) >> PAGE_SHIFT;
9915+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
9916+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
9917+}
9918+#endif
9919+
9920 #endif /* _ASM_X86_DESC_H */
9921diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
9922index 278441f..b95a174 100644
9923--- a/arch/x86/include/asm/desc_defs.h
9924+++ b/arch/x86/include/asm/desc_defs.h
9925@@ -31,6 +31,12 @@ struct desc_struct {
9926 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
9927 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
9928 };
9929+ struct {
9930+ u16 offset_low;
9931+ u16 seg;
9932+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
9933+ unsigned offset_high: 16;
9934+ } gate;
9935 };
9936 } __attribute__((packed));
9937
9938diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
9939index 3778256..c5d4fce 100644
9940--- a/arch/x86/include/asm/e820.h
9941+++ b/arch/x86/include/asm/e820.h
9942@@ -69,7 +69,7 @@ struct e820map {
9943 #define ISA_START_ADDRESS 0xa0000
9944 #define ISA_END_ADDRESS 0x100000
9945
9946-#define BIOS_BEGIN 0x000a0000
9947+#define BIOS_BEGIN 0x000c0000
9948 #define BIOS_END 0x00100000
9949
9950 #define BIOS_ROM_BASE 0xffe00000
9951diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
9952index 5f962df..7289f09 100644
9953--- a/arch/x86/include/asm/elf.h
9954+++ b/arch/x86/include/asm/elf.h
9955@@ -238,7 +238,25 @@ extern int force_personality32;
9956 the loader. We need to make sure that it is out of the way of the program
9957 that it will "exec", and that there is sufficient room for the brk. */
9958
9959+#ifdef CONFIG_PAX_SEGMEXEC
9960+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
9961+#else
9962 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
9963+#endif
9964+
9965+#ifdef CONFIG_PAX_ASLR
9966+#ifdef CONFIG_X86_32
9967+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
9968+
9969+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9970+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9971+#else
9972+#define PAX_ELF_ET_DYN_BASE 0x400000UL
9973+
9974+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9975+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9976+#endif
9977+#endif
9978
9979 /* This yields a mask that user programs can use to figure out what
9980 instruction set this CPU supports. This could be done in user space,
9981@@ -291,9 +309,7 @@ do { \
9982
9983 #define ARCH_DLINFO \
9984 do { \
9985- if (vdso_enabled) \
9986- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
9987- (unsigned long)current->mm->context.vdso); \
9988+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
9989 } while (0)
9990
9991 #define AT_SYSINFO 32
9992@@ -304,7 +320,7 @@ do { \
9993
9994 #endif /* !CONFIG_X86_32 */
9995
9996-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
9997+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
9998
9999 #define VDSO_ENTRY \
10000 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10001@@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10002 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10003 #define compat_arch_setup_additional_pages syscall32_setup_pages
10004
10005-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10006-#define arch_randomize_brk arch_randomize_brk
10007-
10008 /*
10009 * True on X86_32 or when emulating IA32 on X86_64
10010 */
10011diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10012index cc70c1c..d96d011 100644
10013--- a/arch/x86/include/asm/emergency-restart.h
10014+++ b/arch/x86/include/asm/emergency-restart.h
10015@@ -15,6 +15,6 @@ enum reboot_type {
10016
10017 extern enum reboot_type reboot_type;
10018
10019-extern void machine_emergency_restart(void);
10020+extern void machine_emergency_restart(void) __noreturn;
10021
10022 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10023diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
10024index dbe82a5..c6d8a00 100644
10025--- a/arch/x86/include/asm/floppy.h
10026+++ b/arch/x86/include/asm/floppy.h
10027@@ -157,6 +157,7 @@ static unsigned long dma_mem_alloc(unsigned long size)
10028 }
10029
10030
10031+static unsigned long vdma_mem_alloc(unsigned long size) __size_overflow(1);
10032 static unsigned long vdma_mem_alloc(unsigned long size)
10033 {
10034 return (unsigned long)vmalloc(size);
10035diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10036index d09bb03..4ea4194 100644
10037--- a/arch/x86/include/asm/futex.h
10038+++ b/arch/x86/include/asm/futex.h
10039@@ -12,16 +12,18 @@
10040 #include <asm/system.h>
10041
10042 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10043+ typecheck(u32 __user *, uaddr); \
10044 asm volatile("1:\t" insn "\n" \
10045 "2:\t.section .fixup,\"ax\"\n" \
10046 "3:\tmov\t%3, %1\n" \
10047 "\tjmp\t2b\n" \
10048 "\t.previous\n" \
10049 _ASM_EXTABLE(1b, 3b) \
10050- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10051+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10052 : "i" (-EFAULT), "0" (oparg), "1" (0))
10053
10054 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10055+ typecheck(u32 __user *, uaddr); \
10056 asm volatile("1:\tmovl %2, %0\n" \
10057 "\tmovl\t%0, %3\n" \
10058 "\t" insn "\n" \
10059@@ -34,7 +36,7 @@
10060 _ASM_EXTABLE(1b, 4b) \
10061 _ASM_EXTABLE(2b, 4b) \
10062 : "=&a" (oldval), "=&r" (ret), \
10063- "+m" (*uaddr), "=&r" (tem) \
10064+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10065 : "r" (oparg), "i" (-EFAULT), "1" (0))
10066
10067 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10068@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10069
10070 switch (op) {
10071 case FUTEX_OP_SET:
10072- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10073+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10074 break;
10075 case FUTEX_OP_ADD:
10076- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10077+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10078 uaddr, oparg);
10079 break;
10080 case FUTEX_OP_OR:
10081@@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10082 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10083 return -EFAULT;
10084
10085- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10086+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10087 "2:\t.section .fixup, \"ax\"\n"
10088 "3:\tmov %3, %0\n"
10089 "\tjmp 2b\n"
10090 "\t.previous\n"
10091 _ASM_EXTABLE(1b, 3b)
10092- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10093+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10094 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10095 : "memory"
10096 );
10097diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10098index eb92a6e..b98b2f4 100644
10099--- a/arch/x86/include/asm/hw_irq.h
10100+++ b/arch/x86/include/asm/hw_irq.h
10101@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10102 extern void enable_IO_APIC(void);
10103
10104 /* Statistics */
10105-extern atomic_t irq_err_count;
10106-extern atomic_t irq_mis_count;
10107+extern atomic_unchecked_t irq_err_count;
10108+extern atomic_unchecked_t irq_mis_count;
10109
10110 /* EISA */
10111 extern void eisa_set_level_irq(unsigned int irq);
10112diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10113index 2479049..3fb9795 100644
10114--- a/arch/x86/include/asm/i387.h
10115+++ b/arch/x86/include/asm/i387.h
10116@@ -93,6 +93,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10117 {
10118 int err;
10119
10120+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10121+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10122+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10123+#endif
10124+
10125 /* See comment in fxsave() below. */
10126 #ifdef CONFIG_AS_FXSAVEQ
10127 asm volatile("1: fxrstorq %[fx]\n\t"
10128@@ -122,6 +127,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10129 {
10130 int err;
10131
10132+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10133+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10134+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10135+#endif
10136+
10137 /*
10138 * Clear the bytes not touched by the fxsave and reserved
10139 * for the SW usage.
10140@@ -278,7 +288,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10141 "emms\n\t" /* clear stack tags */
10142 "fildl %P[addr]", /* set F?P to defined value */
10143 X86_FEATURE_FXSAVE_LEAK,
10144- [addr] "m" (tsk->thread.fpu.has_fpu));
10145+ [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10146
10147 return fpu_restore_checking(&tsk->thread.fpu);
10148 }
10149@@ -445,7 +455,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
10150 static inline bool interrupted_user_mode(void)
10151 {
10152 struct pt_regs *regs = get_irq_regs();
10153- return regs && user_mode_vm(regs);
10154+ return regs && user_mode(regs);
10155 }
10156
10157 /*
10158diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10159index d8e8eef..99f81ae 100644
10160--- a/arch/x86/include/asm/io.h
10161+++ b/arch/x86/include/asm/io.h
10162@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10163
10164 #include <linux/vmalloc.h>
10165
10166+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10167+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10168+{
10169+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10170+}
10171+
10172+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10173+{
10174+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10175+}
10176+
10177 /*
10178 * Convert a virtual cached pointer to an uncached pointer
10179 */
10180diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10181index bba3cf8..06bc8da 100644
10182--- a/arch/x86/include/asm/irqflags.h
10183+++ b/arch/x86/include/asm/irqflags.h
10184@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10185 sti; \
10186 sysexit
10187
10188+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10189+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10190+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10191+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10192+
10193 #else
10194 #define INTERRUPT_RETURN iret
10195 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10196diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10197index 5478825..839e88c 100644
10198--- a/arch/x86/include/asm/kprobes.h
10199+++ b/arch/x86/include/asm/kprobes.h
10200@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10201 #define RELATIVEJUMP_SIZE 5
10202 #define RELATIVECALL_OPCODE 0xe8
10203 #define RELATIVE_ADDR_SIZE 4
10204-#define MAX_STACK_SIZE 64
10205-#define MIN_STACK_SIZE(ADDR) \
10206- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10207- THREAD_SIZE - (unsigned long)(ADDR))) \
10208- ? (MAX_STACK_SIZE) \
10209- : (((unsigned long)current_thread_info()) + \
10210- THREAD_SIZE - (unsigned long)(ADDR)))
10211+#define MAX_STACK_SIZE 64UL
10212+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10213
10214 #define flush_insn_slot(p) do { } while (0)
10215
10216diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10217index 52d6640..3d2c938 100644
10218--- a/arch/x86/include/asm/kvm_host.h
10219+++ b/arch/x86/include/asm/kvm_host.h
10220@@ -663,7 +663,7 @@ struct kvm_x86_ops {
10221 int (*check_intercept)(struct kvm_vcpu *vcpu,
10222 struct x86_instruction_info *info,
10223 enum x86_intercept_stage stage);
10224-};
10225+} __do_const;
10226
10227 struct kvm_arch_async_pf {
10228 u32 token;
10229@@ -694,7 +694,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
10230 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
10231
10232 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
10233- const void *val, int bytes);
10234+ const void *val, int bytes) __size_overflow(2);
10235 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
10236
10237 extern bool tdp_enabled;
10238@@ -756,7 +756,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
10239 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
10240
10241 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
10242-int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
10243+int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) __size_overflow(3);
10244
10245 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
10246 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
10247@@ -781,7 +781,7 @@ int fx_init(struct kvm_vcpu *vcpu);
10248
10249 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
10250 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
10251- const u8 *new, int bytes);
10252+ const u8 *new, int bytes) __size_overflow(2);
10253 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
10254 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
10255 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
10256diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10257index 9cdae5d..300d20f 100644
10258--- a/arch/x86/include/asm/local.h
10259+++ b/arch/x86/include/asm/local.h
10260@@ -18,26 +18,58 @@ typedef struct {
10261
10262 static inline void local_inc(local_t *l)
10263 {
10264- asm volatile(_ASM_INC "%0"
10265+ asm volatile(_ASM_INC "%0\n"
10266+
10267+#ifdef CONFIG_PAX_REFCOUNT
10268+ "jno 0f\n"
10269+ _ASM_DEC "%0\n"
10270+ "int $4\n0:\n"
10271+ _ASM_EXTABLE(0b, 0b)
10272+#endif
10273+
10274 : "+m" (l->a.counter));
10275 }
10276
10277 static inline void local_dec(local_t *l)
10278 {
10279- asm volatile(_ASM_DEC "%0"
10280+ asm volatile(_ASM_DEC "%0\n"
10281+
10282+#ifdef CONFIG_PAX_REFCOUNT
10283+ "jno 0f\n"
10284+ _ASM_INC "%0\n"
10285+ "int $4\n0:\n"
10286+ _ASM_EXTABLE(0b, 0b)
10287+#endif
10288+
10289 : "+m" (l->a.counter));
10290 }
10291
10292 static inline void local_add(long i, local_t *l)
10293 {
10294- asm volatile(_ASM_ADD "%1,%0"
10295+ asm volatile(_ASM_ADD "%1,%0\n"
10296+
10297+#ifdef CONFIG_PAX_REFCOUNT
10298+ "jno 0f\n"
10299+ _ASM_SUB "%1,%0\n"
10300+ "int $4\n0:\n"
10301+ _ASM_EXTABLE(0b, 0b)
10302+#endif
10303+
10304 : "+m" (l->a.counter)
10305 : "ir" (i));
10306 }
10307
10308 static inline void local_sub(long i, local_t *l)
10309 {
10310- asm volatile(_ASM_SUB "%1,%0"
10311+ asm volatile(_ASM_SUB "%1,%0\n"
10312+
10313+#ifdef CONFIG_PAX_REFCOUNT
10314+ "jno 0f\n"
10315+ _ASM_ADD "%1,%0\n"
10316+ "int $4\n0:\n"
10317+ _ASM_EXTABLE(0b, 0b)
10318+#endif
10319+
10320 : "+m" (l->a.counter)
10321 : "ir" (i));
10322 }
10323@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10324 {
10325 unsigned char c;
10326
10327- asm volatile(_ASM_SUB "%2,%0; sete %1"
10328+ asm volatile(_ASM_SUB "%2,%0\n"
10329+
10330+#ifdef CONFIG_PAX_REFCOUNT
10331+ "jno 0f\n"
10332+ _ASM_ADD "%2,%0\n"
10333+ "int $4\n0:\n"
10334+ _ASM_EXTABLE(0b, 0b)
10335+#endif
10336+
10337+ "sete %1\n"
10338 : "+m" (l->a.counter), "=qm" (c)
10339 : "ir" (i) : "memory");
10340 return c;
10341@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10342 {
10343 unsigned char c;
10344
10345- asm volatile(_ASM_DEC "%0; sete %1"
10346+ asm volatile(_ASM_DEC "%0\n"
10347+
10348+#ifdef CONFIG_PAX_REFCOUNT
10349+ "jno 0f\n"
10350+ _ASM_INC "%0\n"
10351+ "int $4\n0:\n"
10352+ _ASM_EXTABLE(0b, 0b)
10353+#endif
10354+
10355+ "sete %1\n"
10356 : "+m" (l->a.counter), "=qm" (c)
10357 : : "memory");
10358 return c != 0;
10359@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10360 {
10361 unsigned char c;
10362
10363- asm volatile(_ASM_INC "%0; sete %1"
10364+ asm volatile(_ASM_INC "%0\n"
10365+
10366+#ifdef CONFIG_PAX_REFCOUNT
10367+ "jno 0f\n"
10368+ _ASM_DEC "%0\n"
10369+ "int $4\n0:\n"
10370+ _ASM_EXTABLE(0b, 0b)
10371+#endif
10372+
10373+ "sete %1\n"
10374 : "+m" (l->a.counter), "=qm" (c)
10375 : : "memory");
10376 return c != 0;
10377@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10378 {
10379 unsigned char c;
10380
10381- asm volatile(_ASM_ADD "%2,%0; sets %1"
10382+ asm volatile(_ASM_ADD "%2,%0\n"
10383+
10384+#ifdef CONFIG_PAX_REFCOUNT
10385+ "jno 0f\n"
10386+ _ASM_SUB "%2,%0\n"
10387+ "int $4\n0:\n"
10388+ _ASM_EXTABLE(0b, 0b)
10389+#endif
10390+
10391+ "sets %1\n"
10392 : "+m" (l->a.counter), "=qm" (c)
10393 : "ir" (i) : "memory");
10394 return c;
10395@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10396 #endif
10397 /* Modern 486+ processor */
10398 __i = i;
10399- asm volatile(_ASM_XADD "%0, %1;"
10400+ asm volatile(_ASM_XADD "%0, %1\n"
10401+
10402+#ifdef CONFIG_PAX_REFCOUNT
10403+ "jno 0f\n"
10404+ _ASM_MOV "%0,%1\n"
10405+ "int $4\n0:\n"
10406+ _ASM_EXTABLE(0b, 0b)
10407+#endif
10408+
10409 : "+r" (i), "+m" (l->a.counter)
10410 : : "memory");
10411 return i + __i;
10412diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10413index 593e51d..fa69c9a 100644
10414--- a/arch/x86/include/asm/mman.h
10415+++ b/arch/x86/include/asm/mman.h
10416@@ -5,4 +5,14 @@
10417
10418 #include <asm-generic/mman.h>
10419
10420+#ifdef __KERNEL__
10421+#ifndef __ASSEMBLY__
10422+#ifdef CONFIG_X86_32
10423+#define arch_mmap_check i386_mmap_check
10424+int i386_mmap_check(unsigned long addr, unsigned long len,
10425+ unsigned long flags);
10426+#endif
10427+#endif
10428+#endif
10429+
10430 #endif /* _ASM_X86_MMAN_H */
10431diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10432index 5f55e69..e20bfb1 100644
10433--- a/arch/x86/include/asm/mmu.h
10434+++ b/arch/x86/include/asm/mmu.h
10435@@ -9,7 +9,7 @@
10436 * we put the segment information here.
10437 */
10438 typedef struct {
10439- void *ldt;
10440+ struct desc_struct *ldt;
10441 int size;
10442
10443 #ifdef CONFIG_X86_64
10444@@ -18,7 +18,19 @@ typedef struct {
10445 #endif
10446
10447 struct mutex lock;
10448- void *vdso;
10449+ unsigned long vdso;
10450+
10451+#ifdef CONFIG_X86_32
10452+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10453+ unsigned long user_cs_base;
10454+ unsigned long user_cs_limit;
10455+
10456+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10457+ cpumask_t cpu_user_cs_mask;
10458+#endif
10459+
10460+#endif
10461+#endif
10462 } mm_context_t;
10463
10464 #ifdef CONFIG_SMP
10465diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10466index 6902152..399f3a2 100644
10467--- a/arch/x86/include/asm/mmu_context.h
10468+++ b/arch/x86/include/asm/mmu_context.h
10469@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10470
10471 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10472 {
10473+
10474+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10475+ unsigned int i;
10476+ pgd_t *pgd;
10477+
10478+ pax_open_kernel();
10479+ pgd = get_cpu_pgd(smp_processor_id());
10480+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10481+ set_pgd_batched(pgd+i, native_make_pgd(0));
10482+ pax_close_kernel();
10483+#endif
10484+
10485 #ifdef CONFIG_SMP
10486 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10487 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10488@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10489 struct task_struct *tsk)
10490 {
10491 unsigned cpu = smp_processor_id();
10492+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10493+ int tlbstate = TLBSTATE_OK;
10494+#endif
10495
10496 if (likely(prev != next)) {
10497 #ifdef CONFIG_SMP
10498+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10499+ tlbstate = percpu_read(cpu_tlbstate.state);
10500+#endif
10501 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10502 percpu_write(cpu_tlbstate.active_mm, next);
10503 #endif
10504 cpumask_set_cpu(cpu, mm_cpumask(next));
10505
10506 /* Re-load page tables */
10507+#ifdef CONFIG_PAX_PER_CPU_PGD
10508+ pax_open_kernel();
10509+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10510+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10511+ pax_close_kernel();
10512+ load_cr3(get_cpu_pgd(cpu));
10513+#else
10514 load_cr3(next->pgd);
10515+#endif
10516
10517 /* stop flush ipis for the previous mm */
10518 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10519@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10520 */
10521 if (unlikely(prev->context.ldt != next->context.ldt))
10522 load_LDT_nolock(&next->context);
10523- }
10524+
10525+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10526+ if (!(__supported_pte_mask & _PAGE_NX)) {
10527+ smp_mb__before_clear_bit();
10528+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10529+ smp_mb__after_clear_bit();
10530+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10531+ }
10532+#endif
10533+
10534+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10535+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10536+ prev->context.user_cs_limit != next->context.user_cs_limit))
10537+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10538 #ifdef CONFIG_SMP
10539+ else if (unlikely(tlbstate != TLBSTATE_OK))
10540+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10541+#endif
10542+#endif
10543+
10544+ }
10545 else {
10546+
10547+#ifdef CONFIG_PAX_PER_CPU_PGD
10548+ pax_open_kernel();
10549+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10550+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10551+ pax_close_kernel();
10552+ load_cr3(get_cpu_pgd(cpu));
10553+#endif
10554+
10555+#ifdef CONFIG_SMP
10556 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10557 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10558
10559@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10560 * tlb flush IPI delivery. We must reload CR3
10561 * to make sure to use no freed page tables.
10562 */
10563+
10564+#ifndef CONFIG_PAX_PER_CPU_PGD
10565 load_cr3(next->pgd);
10566+#endif
10567+
10568 load_LDT_nolock(&next->context);
10569+
10570+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10571+ if (!(__supported_pte_mask & _PAGE_NX))
10572+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10573+#endif
10574+
10575+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10576+#ifdef CONFIG_PAX_PAGEEXEC
10577+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
10578+#endif
10579+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10580+#endif
10581+
10582 }
10583+#endif
10584 }
10585-#endif
10586 }
10587
10588 #define activate_mm(prev, next) \
10589diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10590index 9eae775..c914fea 100644
10591--- a/arch/x86/include/asm/module.h
10592+++ b/arch/x86/include/asm/module.h
10593@@ -5,6 +5,7 @@
10594
10595 #ifdef CONFIG_X86_64
10596 /* X86_64 does not define MODULE_PROC_FAMILY */
10597+#define MODULE_PROC_FAMILY ""
10598 #elif defined CONFIG_M386
10599 #define MODULE_PROC_FAMILY "386 "
10600 #elif defined CONFIG_M486
10601@@ -59,8 +60,20 @@
10602 #error unknown processor family
10603 #endif
10604
10605-#ifdef CONFIG_X86_32
10606-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
10607+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10608+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10609+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10610+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10611+#else
10612+#define MODULE_PAX_KERNEXEC ""
10613 #endif
10614
10615+#ifdef CONFIG_PAX_MEMORY_UDEREF
10616+#define MODULE_PAX_UDEREF "UDEREF "
10617+#else
10618+#define MODULE_PAX_UDEREF ""
10619+#endif
10620+
10621+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10622+
10623 #endif /* _ASM_X86_MODULE_H */
10624diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10625index 7639dbf..e08a58c 100644
10626--- a/arch/x86/include/asm/page_64_types.h
10627+++ b/arch/x86/include/asm/page_64_types.h
10628@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10629
10630 /* duplicated to the one in bootmem.h */
10631 extern unsigned long max_pfn;
10632-extern unsigned long phys_base;
10633+extern const unsigned long phys_base;
10634
10635 extern unsigned long __phys_addr(unsigned long);
10636 #define __phys_reloc_hide(x) (x)
10637diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10638index a7d2db9..edb023e 100644
10639--- a/arch/x86/include/asm/paravirt.h
10640+++ b/arch/x86/include/asm/paravirt.h
10641@@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10642 val);
10643 }
10644
10645+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10646+{
10647+ pgdval_t val = native_pgd_val(pgd);
10648+
10649+ if (sizeof(pgdval_t) > sizeof(long))
10650+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10651+ val, (u64)val >> 32);
10652+ else
10653+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10654+ val);
10655+}
10656+
10657 static inline void pgd_clear(pgd_t *pgdp)
10658 {
10659 set_pgd(pgdp, __pgd(0));
10660@@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10661 pv_mmu_ops.set_fixmap(idx, phys, flags);
10662 }
10663
10664+#ifdef CONFIG_PAX_KERNEXEC
10665+static inline unsigned long pax_open_kernel(void)
10666+{
10667+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10668+}
10669+
10670+static inline unsigned long pax_close_kernel(void)
10671+{
10672+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10673+}
10674+#else
10675+static inline unsigned long pax_open_kernel(void) { return 0; }
10676+static inline unsigned long pax_close_kernel(void) { return 0; }
10677+#endif
10678+
10679 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10680
10681 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
10682@@ -964,7 +991,7 @@ extern void default_banner(void);
10683
10684 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10685 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10686-#define PARA_INDIRECT(addr) *%cs:addr
10687+#define PARA_INDIRECT(addr) *%ss:addr
10688 #endif
10689
10690 #define INTERRUPT_RETURN \
10691@@ -1041,6 +1068,21 @@ extern void default_banner(void);
10692 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10693 CLBR_NONE, \
10694 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10695+
10696+#define GET_CR0_INTO_RDI \
10697+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10698+ mov %rax,%rdi
10699+
10700+#define SET_RDI_INTO_CR0 \
10701+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10702+
10703+#define GET_CR3_INTO_RDI \
10704+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10705+ mov %rax,%rdi
10706+
10707+#define SET_RDI_INTO_CR3 \
10708+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10709+
10710 #endif /* CONFIG_X86_32 */
10711
10712 #endif /* __ASSEMBLY__ */
10713diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10714index 8e8b9a4..f07d725 100644
10715--- a/arch/x86/include/asm/paravirt_types.h
10716+++ b/arch/x86/include/asm/paravirt_types.h
10717@@ -84,20 +84,20 @@ struct pv_init_ops {
10718 */
10719 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10720 unsigned long addr, unsigned len);
10721-};
10722+} __no_const;
10723
10724
10725 struct pv_lazy_ops {
10726 /* Set deferred update mode, used for batching operations. */
10727 void (*enter)(void);
10728 void (*leave)(void);
10729-};
10730+} __no_const;
10731
10732 struct pv_time_ops {
10733 unsigned long long (*sched_clock)(void);
10734 unsigned long long (*steal_clock)(int cpu);
10735 unsigned long (*get_tsc_khz)(void);
10736-};
10737+} __no_const;
10738
10739 struct pv_cpu_ops {
10740 /* hooks for various privileged instructions */
10741@@ -193,7 +193,7 @@ struct pv_cpu_ops {
10742
10743 void (*start_context_switch)(struct task_struct *prev);
10744 void (*end_context_switch)(struct task_struct *next);
10745-};
10746+} __no_const;
10747
10748 struct pv_irq_ops {
10749 /*
10750@@ -224,7 +224,7 @@ struct pv_apic_ops {
10751 unsigned long start_eip,
10752 unsigned long start_esp);
10753 #endif
10754-};
10755+} __no_const;
10756
10757 struct pv_mmu_ops {
10758 unsigned long (*read_cr2)(void);
10759@@ -313,6 +313,7 @@ struct pv_mmu_ops {
10760 struct paravirt_callee_save make_pud;
10761
10762 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10763+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10764 #endif /* PAGETABLE_LEVELS == 4 */
10765 #endif /* PAGETABLE_LEVELS >= 3 */
10766
10767@@ -324,6 +325,12 @@ struct pv_mmu_ops {
10768 an mfn. We can tell which is which from the index. */
10769 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10770 phys_addr_t phys, pgprot_t flags);
10771+
10772+#ifdef CONFIG_PAX_KERNEXEC
10773+ unsigned long (*pax_open_kernel)(void);
10774+ unsigned long (*pax_close_kernel)(void);
10775+#endif
10776+
10777 };
10778
10779 struct arch_spinlock;
10780@@ -334,7 +341,7 @@ struct pv_lock_ops {
10781 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
10782 int (*spin_trylock)(struct arch_spinlock *lock);
10783 void (*spin_unlock)(struct arch_spinlock *lock);
10784-};
10785+} __no_const;
10786
10787 /* This contains all the paravirt structures: we get a convenient
10788 * number for each function using the offset which we use to indicate
10789diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10790index b4389a4..b7ff22c 100644
10791--- a/arch/x86/include/asm/pgalloc.h
10792+++ b/arch/x86/include/asm/pgalloc.h
10793@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10794 pmd_t *pmd, pte_t *pte)
10795 {
10796 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10797+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10798+}
10799+
10800+static inline void pmd_populate_user(struct mm_struct *mm,
10801+ pmd_t *pmd, pte_t *pte)
10802+{
10803+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10804 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10805 }
10806
10807diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
10808index 98391db..8f6984e 100644
10809--- a/arch/x86/include/asm/pgtable-2level.h
10810+++ b/arch/x86/include/asm/pgtable-2level.h
10811@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
10812
10813 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10814 {
10815+ pax_open_kernel();
10816 *pmdp = pmd;
10817+ pax_close_kernel();
10818 }
10819
10820 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10821diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
10822index effff47..f9e4035 100644
10823--- a/arch/x86/include/asm/pgtable-3level.h
10824+++ b/arch/x86/include/asm/pgtable-3level.h
10825@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10826
10827 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10828 {
10829+ pax_open_kernel();
10830 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
10831+ pax_close_kernel();
10832 }
10833
10834 static inline void native_set_pud(pud_t *pudp, pud_t pud)
10835 {
10836+ pax_open_kernel();
10837 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
10838+ pax_close_kernel();
10839 }
10840
10841 /*
10842diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10843index 49afb3f..ed14d07 100644
10844--- a/arch/x86/include/asm/pgtable.h
10845+++ b/arch/x86/include/asm/pgtable.h
10846@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
10847
10848 #ifndef __PAGETABLE_PUD_FOLDED
10849 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
10850+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
10851 #define pgd_clear(pgd) native_pgd_clear(pgd)
10852 #endif
10853
10854@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
10855
10856 #define arch_end_context_switch(prev) do {} while(0)
10857
10858+#define pax_open_kernel() native_pax_open_kernel()
10859+#define pax_close_kernel() native_pax_close_kernel()
10860 #endif /* CONFIG_PARAVIRT */
10861
10862+#define __HAVE_ARCH_PAX_OPEN_KERNEL
10863+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
10864+
10865+#ifdef CONFIG_PAX_KERNEXEC
10866+static inline unsigned long native_pax_open_kernel(void)
10867+{
10868+ unsigned long cr0;
10869+
10870+ preempt_disable();
10871+ barrier();
10872+ cr0 = read_cr0() ^ X86_CR0_WP;
10873+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
10874+ write_cr0(cr0);
10875+ return cr0 ^ X86_CR0_WP;
10876+}
10877+
10878+static inline unsigned long native_pax_close_kernel(void)
10879+{
10880+ unsigned long cr0;
10881+
10882+ cr0 = read_cr0() ^ X86_CR0_WP;
10883+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
10884+ write_cr0(cr0);
10885+ barrier();
10886+ preempt_enable_no_resched();
10887+ return cr0 ^ X86_CR0_WP;
10888+}
10889+#else
10890+static inline unsigned long native_pax_open_kernel(void) { return 0; }
10891+static inline unsigned long native_pax_close_kernel(void) { return 0; }
10892+#endif
10893+
10894 /*
10895 * The following only work if pte_present() is true.
10896 * Undefined behaviour if not..
10897 */
10898+static inline int pte_user(pte_t pte)
10899+{
10900+ return pte_val(pte) & _PAGE_USER;
10901+}
10902+
10903 static inline int pte_dirty(pte_t pte)
10904 {
10905 return pte_flags(pte) & _PAGE_DIRTY;
10906@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
10907 return pte_clear_flags(pte, _PAGE_RW);
10908 }
10909
10910+static inline pte_t pte_mkread(pte_t pte)
10911+{
10912+ return __pte(pte_val(pte) | _PAGE_USER);
10913+}
10914+
10915 static inline pte_t pte_mkexec(pte_t pte)
10916 {
10917- return pte_clear_flags(pte, _PAGE_NX);
10918+#ifdef CONFIG_X86_PAE
10919+ if (__supported_pte_mask & _PAGE_NX)
10920+ return pte_clear_flags(pte, _PAGE_NX);
10921+ else
10922+#endif
10923+ return pte_set_flags(pte, _PAGE_USER);
10924+}
10925+
10926+static inline pte_t pte_exprotect(pte_t pte)
10927+{
10928+#ifdef CONFIG_X86_PAE
10929+ if (__supported_pte_mask & _PAGE_NX)
10930+ return pte_set_flags(pte, _PAGE_NX);
10931+ else
10932+#endif
10933+ return pte_clear_flags(pte, _PAGE_USER);
10934 }
10935
10936 static inline pte_t pte_mkdirty(pte_t pte)
10937@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
10938 #endif
10939
10940 #ifndef __ASSEMBLY__
10941+
10942+#ifdef CONFIG_PAX_PER_CPU_PGD
10943+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
10944+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
10945+{
10946+ return cpu_pgd[cpu];
10947+}
10948+#endif
10949+
10950 #include <linux/mm_types.h>
10951
10952 static inline int pte_none(pte_t pte)
10953@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
10954
10955 static inline int pgd_bad(pgd_t pgd)
10956 {
10957- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
10958+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
10959 }
10960
10961 static inline int pgd_none(pgd_t pgd)
10962@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
10963 * pgd_offset() returns a (pgd_t *)
10964 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
10965 */
10966-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
10967+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
10968+
10969+#ifdef CONFIG_PAX_PER_CPU_PGD
10970+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
10971+#endif
10972+
10973 /*
10974 * a shortcut which implies the use of the kernel's pgd, instead
10975 * of a process's
10976@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
10977 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
10978 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
10979
10980+#ifdef CONFIG_X86_32
10981+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
10982+#else
10983+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
10984+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
10985+
10986+#ifdef CONFIG_PAX_MEMORY_UDEREF
10987+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
10988+#else
10989+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
10990+#endif
10991+
10992+#endif
10993+
10994 #ifndef __ASSEMBLY__
10995
10996 extern int direct_gbpages;
10997@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
10998 * dst and src can be on the same page, but the range must not overlap,
10999 * and must not cross a page boundary.
11000 */
11001-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11002+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11003 {
11004- memcpy(dst, src, count * sizeof(pgd_t));
11005+ pax_open_kernel();
11006+ while (count--)
11007+ *dst++ = *src++;
11008+ pax_close_kernel();
11009 }
11010
11011+#ifdef CONFIG_PAX_PER_CPU_PGD
11012+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11013+#endif
11014+
11015+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11016+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11017+#else
11018+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11019+#endif
11020
11021 #include <asm-generic/pgtable.h>
11022 #endif /* __ASSEMBLY__ */
11023diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11024index 0c92113..34a77c6 100644
11025--- a/arch/x86/include/asm/pgtable_32.h
11026+++ b/arch/x86/include/asm/pgtable_32.h
11027@@ -25,9 +25,6 @@
11028 struct mm_struct;
11029 struct vm_area_struct;
11030
11031-extern pgd_t swapper_pg_dir[1024];
11032-extern pgd_t initial_page_table[1024];
11033-
11034 static inline void pgtable_cache_init(void) { }
11035 static inline void check_pgt_cache(void) { }
11036 void paging_init(void);
11037@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11038 # include <asm/pgtable-2level.h>
11039 #endif
11040
11041+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11042+extern pgd_t initial_page_table[PTRS_PER_PGD];
11043+#ifdef CONFIG_X86_PAE
11044+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11045+#endif
11046+
11047 #if defined(CONFIG_HIGHPTE)
11048 #define pte_offset_map(dir, address) \
11049 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11050@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11051 /* Clear a kernel PTE and flush it from the TLB */
11052 #define kpte_clear_flush(ptep, vaddr) \
11053 do { \
11054+ pax_open_kernel(); \
11055 pte_clear(&init_mm, (vaddr), (ptep)); \
11056+ pax_close_kernel(); \
11057 __flush_tlb_one((vaddr)); \
11058 } while (0)
11059
11060@@ -74,6 +79,9 @@ do { \
11061
11062 #endif /* !__ASSEMBLY__ */
11063
11064+#define HAVE_ARCH_UNMAPPED_AREA
11065+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11066+
11067 /*
11068 * kern_addr_valid() is (1) for FLATMEM and (0) for
11069 * SPARSEMEM and DISCONTIGMEM
11070diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11071index ed5903b..c7fe163 100644
11072--- a/arch/x86/include/asm/pgtable_32_types.h
11073+++ b/arch/x86/include/asm/pgtable_32_types.h
11074@@ -8,7 +8,7 @@
11075 */
11076 #ifdef CONFIG_X86_PAE
11077 # include <asm/pgtable-3level_types.h>
11078-# define PMD_SIZE (1UL << PMD_SHIFT)
11079+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11080 # define PMD_MASK (~(PMD_SIZE - 1))
11081 #else
11082 # include <asm/pgtable-2level_types.h>
11083@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11084 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11085 #endif
11086
11087+#ifdef CONFIG_PAX_KERNEXEC
11088+#ifndef __ASSEMBLY__
11089+extern unsigned char MODULES_EXEC_VADDR[];
11090+extern unsigned char MODULES_EXEC_END[];
11091+#endif
11092+#include <asm/boot.h>
11093+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11094+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11095+#else
11096+#define ktla_ktva(addr) (addr)
11097+#define ktva_ktla(addr) (addr)
11098+#endif
11099+
11100 #define MODULES_VADDR VMALLOC_START
11101 #define MODULES_END VMALLOC_END
11102 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11103diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11104index 975f709..107976d 100644
11105--- a/arch/x86/include/asm/pgtable_64.h
11106+++ b/arch/x86/include/asm/pgtable_64.h
11107@@ -16,10 +16,14 @@
11108
11109 extern pud_t level3_kernel_pgt[512];
11110 extern pud_t level3_ident_pgt[512];
11111+extern pud_t level3_vmalloc_start_pgt[512];
11112+extern pud_t level3_vmalloc_end_pgt[512];
11113+extern pud_t level3_vmemmap_pgt[512];
11114+extern pud_t level2_vmemmap_pgt[512];
11115 extern pmd_t level2_kernel_pgt[512];
11116 extern pmd_t level2_fixmap_pgt[512];
11117-extern pmd_t level2_ident_pgt[512];
11118-extern pgd_t init_level4_pgt[];
11119+extern pmd_t level2_ident_pgt[512*2];
11120+extern pgd_t init_level4_pgt[512];
11121
11122 #define swapper_pg_dir init_level4_pgt
11123
11124@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11125
11126 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11127 {
11128+ pax_open_kernel();
11129 *pmdp = pmd;
11130+ pax_close_kernel();
11131 }
11132
11133 static inline void native_pmd_clear(pmd_t *pmd)
11134@@ -107,6 +113,13 @@ static inline void native_pud_clear(pud_t *pud)
11135
11136 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11137 {
11138+ pax_open_kernel();
11139+ *pgdp = pgd;
11140+ pax_close_kernel();
11141+}
11142+
11143+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11144+{
11145 *pgdp = pgd;
11146 }
11147
11148diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11149index 766ea16..5b96cb3 100644
11150--- a/arch/x86/include/asm/pgtable_64_types.h
11151+++ b/arch/x86/include/asm/pgtable_64_types.h
11152@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11153 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11154 #define MODULES_END _AC(0xffffffffff000000, UL)
11155 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11156+#define MODULES_EXEC_VADDR MODULES_VADDR
11157+#define MODULES_EXEC_END MODULES_END
11158+
11159+#define ktla_ktva(addr) (addr)
11160+#define ktva_ktla(addr) (addr)
11161
11162 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11163diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11164index 013286a..8b42f4f 100644
11165--- a/arch/x86/include/asm/pgtable_types.h
11166+++ b/arch/x86/include/asm/pgtable_types.h
11167@@ -16,13 +16,12 @@
11168 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11169 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11170 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11171-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11172+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11173 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11174 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11175 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11176-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11177-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11178-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11179+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11180+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11181 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11182
11183 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11184@@ -40,7 +39,6 @@
11185 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11186 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11187 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11188-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11189 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11190 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11191 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11192@@ -57,8 +55,10 @@
11193
11194 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11195 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11196-#else
11197+#elif defined(CONFIG_KMEMCHECK)
11198 #define _PAGE_NX (_AT(pteval_t, 0))
11199+#else
11200+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11201 #endif
11202
11203 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11204@@ -96,6 +96,9 @@
11205 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11206 _PAGE_ACCESSED)
11207
11208+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11209+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11210+
11211 #define __PAGE_KERNEL_EXEC \
11212 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11213 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11214@@ -106,7 +109,7 @@
11215 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11216 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11217 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11218-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11219+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11220 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11221 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11222 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11223@@ -168,8 +171,8 @@
11224 * bits are combined, this will alow user to access the high address mapped
11225 * VDSO in the presence of CONFIG_COMPAT_VDSO
11226 */
11227-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11228-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11229+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11230+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11231 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11232 #endif
11233
11234@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11235 {
11236 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11237 }
11238+#endif
11239
11240+#if PAGETABLE_LEVELS == 3
11241+#include <asm-generic/pgtable-nopud.h>
11242+#endif
11243+
11244+#if PAGETABLE_LEVELS == 2
11245+#include <asm-generic/pgtable-nopmd.h>
11246+#endif
11247+
11248+#ifndef __ASSEMBLY__
11249 #if PAGETABLE_LEVELS > 3
11250 typedef struct { pudval_t pud; } pud_t;
11251
11252@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11253 return pud.pud;
11254 }
11255 #else
11256-#include <asm-generic/pgtable-nopud.h>
11257-
11258 static inline pudval_t native_pud_val(pud_t pud)
11259 {
11260 return native_pgd_val(pud.pgd);
11261@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11262 return pmd.pmd;
11263 }
11264 #else
11265-#include <asm-generic/pgtable-nopmd.h>
11266-
11267 static inline pmdval_t native_pmd_val(pmd_t pmd)
11268 {
11269 return native_pgd_val(pmd.pud.pgd);
11270@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11271
11272 extern pteval_t __supported_pte_mask;
11273 extern void set_nx(void);
11274-extern int nx_enabled;
11275
11276 #define pgprot_writecombine pgprot_writecombine
11277 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11278diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11279index 58545c9..fe6fc38e 100644
11280--- a/arch/x86/include/asm/processor.h
11281+++ b/arch/x86/include/asm/processor.h
11282@@ -266,7 +266,7 @@ struct tss_struct {
11283
11284 } ____cacheline_aligned;
11285
11286-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11287+extern struct tss_struct init_tss[NR_CPUS];
11288
11289 /*
11290 * Save the original ist values for checking stack pointers during debugging
11291@@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
11292 */
11293 #define TASK_SIZE PAGE_OFFSET
11294 #define TASK_SIZE_MAX TASK_SIZE
11295+
11296+#ifdef CONFIG_PAX_SEGMEXEC
11297+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11298+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11299+#else
11300 #define STACK_TOP TASK_SIZE
11301-#define STACK_TOP_MAX STACK_TOP
11302+#endif
11303+
11304+#define STACK_TOP_MAX TASK_SIZE
11305
11306 #define INIT_THREAD { \
11307- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11308+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11309 .vm86_info = NULL, \
11310 .sysenter_cs = __KERNEL_CS, \
11311 .io_bitmap_ptr = NULL, \
11312@@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
11313 */
11314 #define INIT_TSS { \
11315 .x86_tss = { \
11316- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11317+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11318 .ss0 = __KERNEL_DS, \
11319 .ss1 = __KERNEL_CS, \
11320 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11321@@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
11322 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11323
11324 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11325-#define KSTK_TOP(info) \
11326-({ \
11327- unsigned long *__ptr = (unsigned long *)(info); \
11328- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11329-})
11330+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11331
11332 /*
11333 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11334@@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11335 #define task_pt_regs(task) \
11336 ({ \
11337 struct pt_regs *__regs__; \
11338- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11339+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11340 __regs__ - 1; \
11341 })
11342
11343@@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11344 /*
11345 * User space process size. 47bits minus one guard page.
11346 */
11347-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11348+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11349
11350 /* This decides where the kernel will search for a free chunk of vm
11351 * space during mmap's.
11352 */
11353 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11354- 0xc0000000 : 0xFFFFe000)
11355+ 0xc0000000 : 0xFFFFf000)
11356
11357 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11358 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11359@@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11360 #define STACK_TOP_MAX TASK_SIZE_MAX
11361
11362 #define INIT_THREAD { \
11363- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11364+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11365 }
11366
11367 #define INIT_TSS { \
11368- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11369+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11370 }
11371
11372 /*
11373@@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11374 */
11375 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11376
11377+#ifdef CONFIG_PAX_SEGMEXEC
11378+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11379+#endif
11380+
11381 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11382
11383 /* Get/set a process' ability to use the timestamp counter instruction */
11384diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11385index 3566454..4bdfb8c 100644
11386--- a/arch/x86/include/asm/ptrace.h
11387+++ b/arch/x86/include/asm/ptrace.h
11388@@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11389 }
11390
11391 /*
11392- * user_mode_vm(regs) determines whether a register set came from user mode.
11393+ * user_mode(regs) determines whether a register set came from user mode.
11394 * This is true if V8086 mode was enabled OR if the register set was from
11395 * protected mode with RPL-3 CS value. This tricky test checks that with
11396 * one comparison. Many places in the kernel can bypass this full check
11397- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11398+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11399+ * be used.
11400 */
11401-static inline int user_mode(struct pt_regs *regs)
11402+static inline int user_mode_novm(struct pt_regs *regs)
11403 {
11404 #ifdef CONFIG_X86_32
11405 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11406 #else
11407- return !!(regs->cs & 3);
11408+ return !!(regs->cs & SEGMENT_RPL_MASK);
11409 #endif
11410 }
11411
11412-static inline int user_mode_vm(struct pt_regs *regs)
11413+static inline int user_mode(struct pt_regs *regs)
11414 {
11415 #ifdef CONFIG_X86_32
11416 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11417 USER_RPL;
11418 #else
11419- return user_mode(regs);
11420+ return user_mode_novm(regs);
11421 #endif
11422 }
11423
11424@@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11425 #ifdef CONFIG_X86_64
11426 static inline bool user_64bit_mode(struct pt_regs *regs)
11427 {
11428+ unsigned long cs = regs->cs & 0xffff;
11429 #ifndef CONFIG_PARAVIRT
11430 /*
11431 * On non-paravirt systems, this is the only long mode CPL 3
11432 * selector. We do not allow long mode selectors in the LDT.
11433 */
11434- return regs->cs == __USER_CS;
11435+ return cs == __USER_CS;
11436 #else
11437 /* Headers are too twisted for this to go in paravirt.h. */
11438- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11439+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11440 #endif
11441 }
11442 #endif
11443diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11444index 92f29706..a79cbbb 100644
11445--- a/arch/x86/include/asm/reboot.h
11446+++ b/arch/x86/include/asm/reboot.h
11447@@ -6,19 +6,19 @@
11448 struct pt_regs;
11449
11450 struct machine_ops {
11451- void (*restart)(char *cmd);
11452- void (*halt)(void);
11453- void (*power_off)(void);
11454+ void (* __noreturn restart)(char *cmd);
11455+ void (* __noreturn halt)(void);
11456+ void (* __noreturn power_off)(void);
11457 void (*shutdown)(void);
11458 void (*crash_shutdown)(struct pt_regs *);
11459- void (*emergency_restart)(void);
11460-};
11461+ void (* __noreturn emergency_restart)(void);
11462+} __no_const;
11463
11464 extern struct machine_ops machine_ops;
11465
11466 void native_machine_crash_shutdown(struct pt_regs *regs);
11467 void native_machine_shutdown(void);
11468-void machine_real_restart(unsigned int type);
11469+void machine_real_restart(unsigned int type) __noreturn;
11470 /* These must match dispatch_table in reboot_32.S */
11471 #define MRR_BIOS 0
11472 #define MRR_APM 1
11473diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11474index 2dbe4a7..ce1db00 100644
11475--- a/arch/x86/include/asm/rwsem.h
11476+++ b/arch/x86/include/asm/rwsem.h
11477@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11478 {
11479 asm volatile("# beginning down_read\n\t"
11480 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11481+
11482+#ifdef CONFIG_PAX_REFCOUNT
11483+ "jno 0f\n"
11484+ LOCK_PREFIX _ASM_DEC "(%1)\n"
11485+ "int $4\n0:\n"
11486+ _ASM_EXTABLE(0b, 0b)
11487+#endif
11488+
11489 /* adds 0x00000001 */
11490 " jns 1f\n"
11491 " call call_rwsem_down_read_failed\n"
11492@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11493 "1:\n\t"
11494 " mov %1,%2\n\t"
11495 " add %3,%2\n\t"
11496+
11497+#ifdef CONFIG_PAX_REFCOUNT
11498+ "jno 0f\n"
11499+ "sub %3,%2\n"
11500+ "int $4\n0:\n"
11501+ _ASM_EXTABLE(0b, 0b)
11502+#endif
11503+
11504 " jle 2f\n\t"
11505 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11506 " jnz 1b\n\t"
11507@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11508 long tmp;
11509 asm volatile("# beginning down_write\n\t"
11510 LOCK_PREFIX " xadd %1,(%2)\n\t"
11511+
11512+#ifdef CONFIG_PAX_REFCOUNT
11513+ "jno 0f\n"
11514+ "mov %1,(%2)\n"
11515+ "int $4\n0:\n"
11516+ _ASM_EXTABLE(0b, 0b)
11517+#endif
11518+
11519 /* adds 0xffff0001, returns the old value */
11520 " test %1,%1\n\t"
11521 /* was the count 0 before? */
11522@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11523 long tmp;
11524 asm volatile("# beginning __up_read\n\t"
11525 LOCK_PREFIX " xadd %1,(%2)\n\t"
11526+
11527+#ifdef CONFIG_PAX_REFCOUNT
11528+ "jno 0f\n"
11529+ "mov %1,(%2)\n"
11530+ "int $4\n0:\n"
11531+ _ASM_EXTABLE(0b, 0b)
11532+#endif
11533+
11534 /* subtracts 1, returns the old value */
11535 " jns 1f\n\t"
11536 " call call_rwsem_wake\n" /* expects old value in %edx */
11537@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11538 long tmp;
11539 asm volatile("# beginning __up_write\n\t"
11540 LOCK_PREFIX " xadd %1,(%2)\n\t"
11541+
11542+#ifdef CONFIG_PAX_REFCOUNT
11543+ "jno 0f\n"
11544+ "mov %1,(%2)\n"
11545+ "int $4\n0:\n"
11546+ _ASM_EXTABLE(0b, 0b)
11547+#endif
11548+
11549 /* subtracts 0xffff0001, returns the old value */
11550 " jns 1f\n\t"
11551 " call call_rwsem_wake\n" /* expects old value in %edx */
11552@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11553 {
11554 asm volatile("# beginning __downgrade_write\n\t"
11555 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11556+
11557+#ifdef CONFIG_PAX_REFCOUNT
11558+ "jno 0f\n"
11559+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11560+ "int $4\n0:\n"
11561+ _ASM_EXTABLE(0b, 0b)
11562+#endif
11563+
11564 /*
11565 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11566 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11567@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11568 */
11569 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
11570 {
11571- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11572+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11573+
11574+#ifdef CONFIG_PAX_REFCOUNT
11575+ "jno 0f\n"
11576+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
11577+ "int $4\n0:\n"
11578+ _ASM_EXTABLE(0b, 0b)
11579+#endif
11580+
11581 : "+m" (sem->count)
11582 : "er" (delta));
11583 }
11584@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
11585 */
11586 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
11587 {
11588- return delta + xadd(&sem->count, delta);
11589+ return delta + xadd_check_overflow(&sem->count, delta);
11590 }
11591
11592 #endif /* __KERNEL__ */
11593diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11594index 5e64171..f58957e 100644
11595--- a/arch/x86/include/asm/segment.h
11596+++ b/arch/x86/include/asm/segment.h
11597@@ -64,10 +64,15 @@
11598 * 26 - ESPFIX small SS
11599 * 27 - per-cpu [ offset to per-cpu data area ]
11600 * 28 - stack_canary-20 [ for stack protector ]
11601- * 29 - unused
11602- * 30 - unused
11603+ * 29 - PCI BIOS CS
11604+ * 30 - PCI BIOS DS
11605 * 31 - TSS for double fault handler
11606 */
11607+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11608+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11609+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11610+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11611+
11612 #define GDT_ENTRY_TLS_MIN 6
11613 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11614
11615@@ -79,6 +84,8 @@
11616
11617 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
11618
11619+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11620+
11621 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
11622
11623 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
11624@@ -104,6 +111,12 @@
11625 #define __KERNEL_STACK_CANARY 0
11626 #endif
11627
11628+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
11629+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11630+
11631+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
11632+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11633+
11634 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11635
11636 /*
11637@@ -141,7 +154,7 @@
11638 */
11639
11640 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11641-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11642+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11643
11644
11645 #else
11646@@ -165,6 +178,8 @@
11647 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
11648 #define __USER32_DS __USER_DS
11649
11650+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11651+
11652 #define GDT_ENTRY_TSS 8 /* needs two entries */
11653 #define GDT_ENTRY_LDT 10 /* needs two entries */
11654 #define GDT_ENTRY_TLS_MIN 12
11655@@ -185,6 +200,7 @@
11656 #endif
11657
11658 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
11659+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
11660 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
11661 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
11662 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
11663diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11664index 0434c40..1714bf0 100644
11665--- a/arch/x86/include/asm/smp.h
11666+++ b/arch/x86/include/asm/smp.h
11667@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11668 /* cpus sharing the last level cache: */
11669 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
11670 DECLARE_PER_CPU(u16, cpu_llc_id);
11671-DECLARE_PER_CPU(int, cpu_number);
11672+DECLARE_PER_CPU(unsigned int, cpu_number);
11673
11674 static inline struct cpumask *cpu_sibling_mask(int cpu)
11675 {
11676@@ -77,7 +77,7 @@ struct smp_ops {
11677
11678 void (*send_call_func_ipi)(const struct cpumask *mask);
11679 void (*send_call_func_single_ipi)(int cpu);
11680-};
11681+} __no_const;
11682
11683 /* Globals due to paravirt */
11684 extern void set_cpu_sibling_map(int cpu);
11685@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11686 extern int safe_smp_processor_id(void);
11687
11688 #elif defined(CONFIG_X86_64_SMP)
11689-#define raw_smp_processor_id() (percpu_read(cpu_number))
11690-
11691-#define stack_smp_processor_id() \
11692-({ \
11693- struct thread_info *ti; \
11694- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11695- ti->cpu; \
11696-})
11697+#define raw_smp_processor_id() (percpu_read(cpu_number))
11698+#define stack_smp_processor_id() raw_smp_processor_id()
11699 #define safe_smp_processor_id() smp_processor_id()
11700
11701 #endif
11702diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11703index a82c2bf..2198f61 100644
11704--- a/arch/x86/include/asm/spinlock.h
11705+++ b/arch/x86/include/asm/spinlock.h
11706@@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
11707 static inline void arch_read_lock(arch_rwlock_t *rw)
11708 {
11709 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
11710+
11711+#ifdef CONFIG_PAX_REFCOUNT
11712+ "jno 0f\n"
11713+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
11714+ "int $4\n0:\n"
11715+ _ASM_EXTABLE(0b, 0b)
11716+#endif
11717+
11718 "jns 1f\n"
11719 "call __read_lock_failed\n\t"
11720 "1:\n"
11721@@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
11722 static inline void arch_write_lock(arch_rwlock_t *rw)
11723 {
11724 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
11725+
11726+#ifdef CONFIG_PAX_REFCOUNT
11727+ "jno 0f\n"
11728+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
11729+ "int $4\n0:\n"
11730+ _ASM_EXTABLE(0b, 0b)
11731+#endif
11732+
11733 "jz 1f\n"
11734 "call __write_lock_failed\n\t"
11735 "1:\n"
11736@@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
11737
11738 static inline void arch_read_unlock(arch_rwlock_t *rw)
11739 {
11740- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
11741+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
11742+
11743+#ifdef CONFIG_PAX_REFCOUNT
11744+ "jno 0f\n"
11745+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
11746+ "int $4\n0:\n"
11747+ _ASM_EXTABLE(0b, 0b)
11748+#endif
11749+
11750 :"+m" (rw->lock) : : "memory");
11751 }
11752
11753 static inline void arch_write_unlock(arch_rwlock_t *rw)
11754 {
11755- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
11756+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
11757+
11758+#ifdef CONFIG_PAX_REFCOUNT
11759+ "jno 0f\n"
11760+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
11761+ "int $4\n0:\n"
11762+ _ASM_EXTABLE(0b, 0b)
11763+#endif
11764+
11765 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
11766 }
11767
11768diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11769index 1575177..cb23f52 100644
11770--- a/arch/x86/include/asm/stackprotector.h
11771+++ b/arch/x86/include/asm/stackprotector.h
11772@@ -48,7 +48,7 @@
11773 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11774 */
11775 #define GDT_STACK_CANARY_INIT \
11776- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11777+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11778
11779 /*
11780 * Initialize the stackprotector canary value.
11781@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11782
11783 static inline void load_stack_canary_segment(void)
11784 {
11785-#ifdef CONFIG_X86_32
11786+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
11787 asm volatile ("mov %0, %%gs" : : "r" (0));
11788 #endif
11789 }
11790diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
11791index 70bbe39..4ae2bd4 100644
11792--- a/arch/x86/include/asm/stacktrace.h
11793+++ b/arch/x86/include/asm/stacktrace.h
11794@@ -11,28 +11,20 @@
11795
11796 extern int kstack_depth_to_print;
11797
11798-struct thread_info;
11799+struct task_struct;
11800 struct stacktrace_ops;
11801
11802-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
11803- unsigned long *stack,
11804- unsigned long bp,
11805- const struct stacktrace_ops *ops,
11806- void *data,
11807- unsigned long *end,
11808- int *graph);
11809+typedef unsigned long walk_stack_t(struct task_struct *task,
11810+ void *stack_start,
11811+ unsigned long *stack,
11812+ unsigned long bp,
11813+ const struct stacktrace_ops *ops,
11814+ void *data,
11815+ unsigned long *end,
11816+ int *graph);
11817
11818-extern unsigned long
11819-print_context_stack(struct thread_info *tinfo,
11820- unsigned long *stack, unsigned long bp,
11821- const struct stacktrace_ops *ops, void *data,
11822- unsigned long *end, int *graph);
11823-
11824-extern unsigned long
11825-print_context_stack_bp(struct thread_info *tinfo,
11826- unsigned long *stack, unsigned long bp,
11827- const struct stacktrace_ops *ops, void *data,
11828- unsigned long *end, int *graph);
11829+extern walk_stack_t print_context_stack;
11830+extern walk_stack_t print_context_stack_bp;
11831
11832 /* Generic stack tracer with callbacks */
11833
11834@@ -40,7 +32,7 @@ struct stacktrace_ops {
11835 void (*address)(void *data, unsigned long address, int reliable);
11836 /* On negative return stop dumping */
11837 int (*stack)(void *data, char *name);
11838- walk_stack_t walk_stack;
11839+ walk_stack_t *walk_stack;
11840 };
11841
11842 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
11843diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
11844index cb23852..2dde194 100644
11845--- a/arch/x86/include/asm/sys_ia32.h
11846+++ b/arch/x86/include/asm/sys_ia32.h
11847@@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
11848 compat_sigset_t __user *, unsigned int);
11849 asmlinkage long sys32_alarm(unsigned int);
11850
11851-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
11852+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
11853 asmlinkage long sys32_sysfs(int, u32, u32);
11854
11855 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
11856diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
11857index f1d8b44..a4de8b7 100644
11858--- a/arch/x86/include/asm/syscalls.h
11859+++ b/arch/x86/include/asm/syscalls.h
11860@@ -30,7 +30,7 @@ long sys_clone(unsigned long, unsigned long, void __user *,
11861 void __user *, struct pt_regs *);
11862
11863 /* kernel/ldt.c */
11864-asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
11865+asmlinkage int sys_modify_ldt(int, void __user *, unsigned long) __size_overflow(3);
11866
11867 /* kernel/signal.c */
11868 long sys_rt_sigreturn(struct pt_regs *);
11869diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
11870index 2d2f01c..f985723 100644
11871--- a/arch/x86/include/asm/system.h
11872+++ b/arch/x86/include/asm/system.h
11873@@ -129,7 +129,7 @@ do { \
11874 "call __switch_to\n\t" \
11875 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
11876 __switch_canary \
11877- "movq %P[thread_info](%%rsi),%%r8\n\t" \
11878+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
11879 "movq %%rax,%%rdi\n\t" \
11880 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
11881 "jnz ret_from_fork\n\t" \
11882@@ -140,7 +140,7 @@ do { \
11883 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
11884 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
11885 [_tif_fork] "i" (_TIF_FORK), \
11886- [thread_info] "i" (offsetof(struct task_struct, stack)), \
11887+ [thread_info] "m" (current_tinfo), \
11888 [current_task] "m" (current_task) \
11889 __switch_canary_iparam \
11890 : "memory", "cc" __EXTRA_CLOBBER)
11891@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
11892 {
11893 unsigned long __limit;
11894 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
11895- return __limit + 1;
11896+ return __limit;
11897 }
11898
11899 static inline void native_clts(void)
11900@@ -397,13 +397,13 @@ void enable_hlt(void);
11901
11902 void cpu_idle_wait(void);
11903
11904-extern unsigned long arch_align_stack(unsigned long sp);
11905+#define arch_align_stack(x) ((x) & ~0xfUL)
11906 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11907
11908 void default_idle(void);
11909 bool set_pm_idle_to_default(void);
11910
11911-void stop_this_cpu(void *dummy);
11912+void stop_this_cpu(void *dummy) __noreturn;
11913
11914 /*
11915 * Force strict CPU ordering.
11916diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
11917index cfd8144..1b1127d 100644
11918--- a/arch/x86/include/asm/thread_info.h
11919+++ b/arch/x86/include/asm/thread_info.h
11920@@ -10,6 +10,7 @@
11921 #include <linux/compiler.h>
11922 #include <asm/page.h>
11923 #include <asm/types.h>
11924+#include <asm/percpu.h>
11925
11926 /*
11927 * low level task data that entry.S needs immediate access to
11928@@ -24,7 +25,6 @@ struct exec_domain;
11929 #include <linux/atomic.h>
11930
11931 struct thread_info {
11932- struct task_struct *task; /* main task structure */
11933 struct exec_domain *exec_domain; /* execution domain */
11934 __u32 flags; /* low level flags */
11935 __u32 status; /* thread synchronous flags */
11936@@ -34,19 +34,13 @@ struct thread_info {
11937 mm_segment_t addr_limit;
11938 struct restart_block restart_block;
11939 void __user *sysenter_return;
11940-#ifdef CONFIG_X86_32
11941- unsigned long previous_esp; /* ESP of the previous stack in
11942- case of nested (IRQ) stacks
11943- */
11944- __u8 supervisor_stack[0];
11945-#endif
11946+ unsigned long lowest_stack;
11947 unsigned int sig_on_uaccess_error:1;
11948 unsigned int uaccess_err:1; /* uaccess failed */
11949 };
11950
11951-#define INIT_THREAD_INFO(tsk) \
11952+#define INIT_THREAD_INFO \
11953 { \
11954- .task = &tsk, \
11955 .exec_domain = &default_exec_domain, \
11956 .flags = 0, \
11957 .cpu = 0, \
11958@@ -57,7 +51,7 @@ struct thread_info {
11959 }, \
11960 }
11961
11962-#define init_thread_info (init_thread_union.thread_info)
11963+#define init_thread_info (init_thread_union.stack)
11964 #define init_stack (init_thread_union.stack)
11965
11966 #else /* !__ASSEMBLY__ */
11967@@ -169,45 +163,40 @@ struct thread_info {
11968 ret; \
11969 })
11970
11971-#ifdef CONFIG_X86_32
11972-
11973-#define STACK_WARN (THREAD_SIZE/8)
11974-/*
11975- * macros/functions for gaining access to the thread information structure
11976- *
11977- * preempt_count needs to be 1 initially, until the scheduler is functional.
11978- */
11979-#ifndef __ASSEMBLY__
11980-
11981-
11982-/* how to get the current stack pointer from C */
11983-register unsigned long current_stack_pointer asm("esp") __used;
11984-
11985-/* how to get the thread information struct from C */
11986-static inline struct thread_info *current_thread_info(void)
11987-{
11988- return (struct thread_info *)
11989- (current_stack_pointer & ~(THREAD_SIZE - 1));
11990-}
11991-
11992-#else /* !__ASSEMBLY__ */
11993-
11994+#ifdef __ASSEMBLY__
11995 /* how to get the thread information struct from ASM */
11996 #define GET_THREAD_INFO(reg) \
11997- movl $-THREAD_SIZE, reg; \
11998- andl %esp, reg
11999+ mov PER_CPU_VAR(current_tinfo), reg
12000
12001 /* use this one if reg already contains %esp */
12002-#define GET_THREAD_INFO_WITH_ESP(reg) \
12003- andl $-THREAD_SIZE, reg
12004+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12005+#else
12006+/* how to get the thread information struct from C */
12007+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12008+
12009+static __always_inline struct thread_info *current_thread_info(void)
12010+{
12011+ return percpu_read_stable(current_tinfo);
12012+}
12013+#endif
12014+
12015+#ifdef CONFIG_X86_32
12016+
12017+#define STACK_WARN (THREAD_SIZE/8)
12018+/*
12019+ * macros/functions for gaining access to the thread information structure
12020+ *
12021+ * preempt_count needs to be 1 initially, until the scheduler is functional.
12022+ */
12023+#ifndef __ASSEMBLY__
12024+
12025+/* how to get the current stack pointer from C */
12026+register unsigned long current_stack_pointer asm("esp") __used;
12027
12028 #endif
12029
12030 #else /* X86_32 */
12031
12032-#include <asm/percpu.h>
12033-#define KERNEL_STACK_OFFSET (5*8)
12034-
12035 /*
12036 * macros/functions for gaining access to the thread information structure
12037 * preempt_count needs to be 1 initially, until the scheduler is functional.
12038@@ -215,27 +204,8 @@ static inline struct thread_info *current_thread_info(void)
12039 #ifndef __ASSEMBLY__
12040 DECLARE_PER_CPU(unsigned long, kernel_stack);
12041
12042-static inline struct thread_info *current_thread_info(void)
12043-{
12044- struct thread_info *ti;
12045- ti = (void *)(percpu_read_stable(kernel_stack) +
12046- KERNEL_STACK_OFFSET - THREAD_SIZE);
12047- return ti;
12048-}
12049-
12050-#else /* !__ASSEMBLY__ */
12051-
12052-/* how to get the thread information struct from ASM */
12053-#define GET_THREAD_INFO(reg) \
12054- movq PER_CPU_VAR(kernel_stack),reg ; \
12055- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12056-
12057-/*
12058- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12059- * a certain register (to be used in assembler memory operands).
12060- */
12061-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12062-
12063+/* how to get the current stack pointer from C */
12064+register unsigned long current_stack_pointer asm("rsp") __used;
12065 #endif
12066
12067 #endif /* !X86_32 */
12068@@ -269,5 +239,16 @@ extern void arch_task_cache_init(void);
12069 extern void free_thread_info(struct thread_info *ti);
12070 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12071 #define arch_task_cache_init arch_task_cache_init
12072+
12073+#define __HAVE_THREAD_FUNCTIONS
12074+#define task_thread_info(task) (&(task)->tinfo)
12075+#define task_stack_page(task) ((task)->stack)
12076+#define setup_thread_stack(p, org) do {} while (0)
12077+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12078+
12079+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12080+extern struct task_struct *alloc_task_struct_node(int node);
12081+extern void free_task_struct(struct task_struct *);
12082+
12083 #endif
12084 #endif /* _ASM_X86_THREAD_INFO_H */
12085diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12086index 8be5f54..7ae826d 100644
12087--- a/arch/x86/include/asm/uaccess.h
12088+++ b/arch/x86/include/asm/uaccess.h
12089@@ -7,12 +7,15 @@
12090 #include <linux/compiler.h>
12091 #include <linux/thread_info.h>
12092 #include <linux/string.h>
12093+#include <linux/sched.h>
12094 #include <asm/asm.h>
12095 #include <asm/page.h>
12096
12097 #define VERIFY_READ 0
12098 #define VERIFY_WRITE 1
12099
12100+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12101+
12102 /*
12103 * The fs value determines whether argument validity checking should be
12104 * performed or not. If get_fs() == USER_DS, checking is performed, with
12105@@ -28,7 +31,12 @@
12106
12107 #define get_ds() (KERNEL_DS)
12108 #define get_fs() (current_thread_info()->addr_limit)
12109+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12110+void __set_fs(mm_segment_t x);
12111+void set_fs(mm_segment_t x);
12112+#else
12113 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12114+#endif
12115
12116 #define segment_eq(a, b) ((a).seg == (b).seg)
12117
12118@@ -76,7 +84,33 @@
12119 * checks that the pointer is in the user space range - after calling
12120 * this function, memory access functions may still return -EFAULT.
12121 */
12122-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12123+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12124+#define access_ok(type, addr, size) \
12125+({ \
12126+ long __size = size; \
12127+ unsigned long __addr = (unsigned long)addr; \
12128+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12129+ unsigned long __end_ao = __addr + __size - 1; \
12130+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12131+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12132+ while(__addr_ao <= __end_ao) { \
12133+ char __c_ao; \
12134+ __addr_ao += PAGE_SIZE; \
12135+ if (__size > PAGE_SIZE) \
12136+ cond_resched(); \
12137+ if (__get_user(__c_ao, (char __user *)__addr)) \
12138+ break; \
12139+ if (type != VERIFY_WRITE) { \
12140+ __addr = __addr_ao; \
12141+ continue; \
12142+ } \
12143+ if (__put_user(__c_ao, (char __user *)__addr)) \
12144+ break; \
12145+ __addr = __addr_ao; \
12146+ } \
12147+ } \
12148+ __ret_ao; \
12149+})
12150
12151 /*
12152 * The exception table consists of pairs of addresses: the first is the
12153@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12154 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12155 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12156
12157-
12158+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12159+#define __copyuser_seg "gs;"
12160+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12161+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12162+#else
12163+#define __copyuser_seg
12164+#define __COPYUSER_SET_ES
12165+#define __COPYUSER_RESTORE_ES
12166+#endif
12167
12168 #ifdef CONFIG_X86_32
12169 #define __put_user_asm_u64(x, addr, err, errret) \
12170- asm volatile("1: movl %%eax,0(%2)\n" \
12171- "2: movl %%edx,4(%2)\n" \
12172+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12173+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12174 "3:\n" \
12175 ".section .fixup,\"ax\"\n" \
12176 "4: movl %3,%0\n" \
12177@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12178 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12179
12180 #define __put_user_asm_ex_u64(x, addr) \
12181- asm volatile("1: movl %%eax,0(%1)\n" \
12182- "2: movl %%edx,4(%1)\n" \
12183+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12184+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12185 "3:\n" \
12186 _ASM_EXTABLE(1b, 2b - 1b) \
12187 _ASM_EXTABLE(2b, 3b - 2b) \
12188@@ -252,7 +294,7 @@ extern void __put_user_8(void);
12189 __typeof__(*(ptr)) __pu_val; \
12190 __chk_user_ptr(ptr); \
12191 might_fault(); \
12192- __pu_val = x; \
12193+ __pu_val = (x); \
12194 switch (sizeof(*(ptr))) { \
12195 case 1: \
12196 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12197@@ -373,7 +415,7 @@ do { \
12198 } while (0)
12199
12200 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12201- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12202+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12203 "2:\n" \
12204 ".section .fixup,\"ax\"\n" \
12205 "3: mov %3,%0\n" \
12206@@ -381,7 +423,7 @@ do { \
12207 " jmp 2b\n" \
12208 ".previous\n" \
12209 _ASM_EXTABLE(1b, 3b) \
12210- : "=r" (err), ltype(x) \
12211+ : "=r" (err), ltype (x) \
12212 : "m" (__m(addr)), "i" (errret), "0" (err))
12213
12214 #define __get_user_size_ex(x, ptr, size) \
12215@@ -406,7 +448,7 @@ do { \
12216 } while (0)
12217
12218 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12219- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12220+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12221 "2:\n" \
12222 _ASM_EXTABLE(1b, 2b - 1b) \
12223 : ltype(x) : "m" (__m(addr)))
12224@@ -423,13 +465,24 @@ do { \
12225 int __gu_err; \
12226 unsigned long __gu_val; \
12227 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12228- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12229+ (x) = (__typeof__(*(ptr)))__gu_val; \
12230 __gu_err; \
12231 })
12232
12233 /* FIXME: this hack is definitely wrong -AK */
12234 struct __large_struct { unsigned long buf[100]; };
12235-#define __m(x) (*(struct __large_struct __user *)(x))
12236+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12237+#define ____m(x) \
12238+({ \
12239+ unsigned long ____x = (unsigned long)(x); \
12240+ if (____x < PAX_USER_SHADOW_BASE) \
12241+ ____x += PAX_USER_SHADOW_BASE; \
12242+ (void __user *)____x; \
12243+})
12244+#else
12245+#define ____m(x) (x)
12246+#endif
12247+#define __m(x) (*(struct __large_struct __user *)____m(x))
12248
12249 /*
12250 * Tell gcc we read from memory instead of writing: this is because
12251@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12252 * aliasing issues.
12253 */
12254 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12255- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12256+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12257 "2:\n" \
12258 ".section .fixup,\"ax\"\n" \
12259 "3: mov %3,%0\n" \
12260@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12261 ".previous\n" \
12262 _ASM_EXTABLE(1b, 3b) \
12263 : "=r"(err) \
12264- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12265+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12266
12267 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12268- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12269+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12270 "2:\n" \
12271 _ASM_EXTABLE(1b, 2b - 1b) \
12272 : : ltype(x), "m" (__m(addr)))
12273@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12274 * On error, the variable @x is set to zero.
12275 */
12276
12277+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12278+#define __get_user(x, ptr) get_user((x), (ptr))
12279+#else
12280 #define __get_user(x, ptr) \
12281 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12282+#endif
12283
12284 /**
12285 * __put_user: - Write a simple value into user space, with less checking.
12286@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12287 * Returns zero on success, or -EFAULT on error.
12288 */
12289
12290+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12291+#define __put_user(x, ptr) put_user((x), (ptr))
12292+#else
12293 #define __put_user(x, ptr) \
12294 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12295+#endif
12296
12297 #define __get_user_unaligned __get_user
12298 #define __put_user_unaligned __put_user
12299@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12300 #define get_user_ex(x, ptr) do { \
12301 unsigned long __gue_val; \
12302 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12303- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12304+ (x) = (__typeof__(*(ptr)))__gue_val; \
12305 } while (0)
12306
12307 #ifdef CONFIG_X86_WP_WORKS_OK
12308diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12309index 566e803..7183d0b 100644
12310--- a/arch/x86/include/asm/uaccess_32.h
12311+++ b/arch/x86/include/asm/uaccess_32.h
12312@@ -11,15 +11,15 @@
12313 #include <asm/page.h>
12314
12315 unsigned long __must_check __copy_to_user_ll
12316- (void __user *to, const void *from, unsigned long n);
12317+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12318 unsigned long __must_check __copy_from_user_ll
12319- (void *to, const void __user *from, unsigned long n);
12320+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12321 unsigned long __must_check __copy_from_user_ll_nozero
12322- (void *to, const void __user *from, unsigned long n);
12323+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12324 unsigned long __must_check __copy_from_user_ll_nocache
12325- (void *to, const void __user *from, unsigned long n);
12326+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12327 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12328- (void *to, const void __user *from, unsigned long n);
12329+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12330
12331 /**
12332 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12333@@ -41,8 +41,13 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12334 */
12335
12336 static __always_inline unsigned long __must_check
12337+__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) __size_overflow(3);
12338+static __always_inline unsigned long __must_check
12339 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12340 {
12341+ if ((long)n < 0)
12342+ return n;
12343+
12344 if (__builtin_constant_p(n)) {
12345 unsigned long ret;
12346
12347@@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12348 return ret;
12349 }
12350 }
12351+ if (!__builtin_constant_p(n))
12352+ check_object_size(from, n, true);
12353 return __copy_to_user_ll(to, from, n);
12354 }
12355
12356@@ -79,15 +86,23 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12357 * On success, this will be zero.
12358 */
12359 static __always_inline unsigned long __must_check
12360+__copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
12361+static __always_inline unsigned long __must_check
12362 __copy_to_user(void __user *to, const void *from, unsigned long n)
12363 {
12364 might_fault();
12365+
12366 return __copy_to_user_inatomic(to, from, n);
12367 }
12368
12369 static __always_inline unsigned long
12370+__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) __size_overflow(3);
12371+static __always_inline unsigned long
12372 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12373 {
12374+ if ((long)n < 0)
12375+ return n;
12376+
12377 /* Avoid zeroing the tail if the copy fails..
12378 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12379 * but as the zeroing behaviour is only significant when n is not
12380@@ -134,9 +149,15 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12381 * for explanation of why this is needed.
12382 */
12383 static __always_inline unsigned long
12384+__copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
12385+static __always_inline unsigned long
12386 __copy_from_user(void *to, const void __user *from, unsigned long n)
12387 {
12388 might_fault();
12389+
12390+ if ((long)n < 0)
12391+ return n;
12392+
12393 if (__builtin_constant_p(n)) {
12394 unsigned long ret;
12395
12396@@ -152,13 +173,21 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12397 return ret;
12398 }
12399 }
12400+ if (!__builtin_constant_p(n))
12401+ check_object_size(to, n, false);
12402 return __copy_from_user_ll(to, from, n);
12403 }
12404
12405 static __always_inline unsigned long __copy_from_user_nocache(void *to,
12406+ const void __user *from, unsigned long n) __size_overflow(3);
12407+static __always_inline unsigned long __copy_from_user_nocache(void *to,
12408 const void __user *from, unsigned long n)
12409 {
12410 might_fault();
12411+
12412+ if ((long)n < 0)
12413+ return n;
12414+
12415 if (__builtin_constant_p(n)) {
12416 unsigned long ret;
12417
12418@@ -179,17 +208,24 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12419
12420 static __always_inline unsigned long
12421 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12422+ unsigned long n) __size_overflow(3);
12423+static __always_inline unsigned long
12424+__copy_from_user_inatomic_nocache(void *to, const void __user *from,
12425 unsigned long n)
12426 {
12427- return __copy_from_user_ll_nocache_nozero(to, from, n);
12428+ if ((long)n < 0)
12429+ return n;
12430+
12431+ return __copy_from_user_ll_nocache_nozero(to, from, n);
12432 }
12433
12434-unsigned long __must_check copy_to_user(void __user *to,
12435- const void *from, unsigned long n);
12436-unsigned long __must_check _copy_from_user(void *to,
12437- const void __user *from,
12438- unsigned long n);
12439-
12440+extern void copy_to_user_overflow(void)
12441+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12442+ __compiletime_error("copy_to_user() buffer size is not provably correct")
12443+#else
12444+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
12445+#endif
12446+;
12447
12448 extern void copy_from_user_overflow(void)
12449 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12450@@ -199,17 +235,65 @@ extern void copy_from_user_overflow(void)
12451 #endif
12452 ;
12453
12454-static inline unsigned long __must_check copy_from_user(void *to,
12455- const void __user *from,
12456- unsigned long n)
12457+/**
12458+ * copy_to_user: - Copy a block of data into user space.
12459+ * @to: Destination address, in user space.
12460+ * @from: Source address, in kernel space.
12461+ * @n: Number of bytes to copy.
12462+ *
12463+ * Context: User context only. This function may sleep.
12464+ *
12465+ * Copy data from kernel space to user space.
12466+ *
12467+ * Returns number of bytes that could not be copied.
12468+ * On success, this will be zero.
12469+ */
12470+static inline unsigned long __must_check
12471+copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
12472+static inline unsigned long __must_check
12473+copy_to_user(void __user *to, const void *from, unsigned long n)
12474+{
12475+ int sz = __compiletime_object_size(from);
12476+
12477+ if (unlikely(sz != -1 && sz < n))
12478+ copy_to_user_overflow();
12479+ else if (access_ok(VERIFY_WRITE, to, n))
12480+ n = __copy_to_user(to, from, n);
12481+ return n;
12482+}
12483+
12484+/**
12485+ * copy_from_user: - Copy a block of data from user space.
12486+ * @to: Destination address, in kernel space.
12487+ * @from: Source address, in user space.
12488+ * @n: Number of bytes to copy.
12489+ *
12490+ * Context: User context only. This function may sleep.
12491+ *
12492+ * Copy data from user space to kernel space.
12493+ *
12494+ * Returns number of bytes that could not be copied.
12495+ * On success, this will be zero.
12496+ *
12497+ * If some data could not be copied, this function will pad the copied
12498+ * data to the requested size using zero bytes.
12499+ */
12500+static inline unsigned long __must_check
12501+copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
12502+static inline unsigned long __must_check
12503+copy_from_user(void *to, const void __user *from, unsigned long n)
12504 {
12505 int sz = __compiletime_object_size(to);
12506
12507- if (likely(sz == -1 || sz >= n))
12508- n = _copy_from_user(to, from, n);
12509- else
12510+ if (unlikely(sz != -1 && sz < n))
12511 copy_from_user_overflow();
12512-
12513+ else if (access_ok(VERIFY_READ, from, n))
12514+ n = __copy_from_user(to, from, n);
12515+ else if ((long)n > 0) {
12516+ if (!__builtin_constant_p(n))
12517+ check_object_size(to, n, false);
12518+ memset(to, 0, n);
12519+ }
12520 return n;
12521 }
12522
12523@@ -235,7 +319,7 @@ long __must_check __strncpy_from_user(char *dst,
12524 #define strlen_user(str) strnlen_user(str, LONG_MAX)
12525
12526 long strnlen_user(const char __user *str, long n);
12527-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
12528-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
12529+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
12530+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
12531
12532 #endif /* _ASM_X86_UACCESS_32_H */
12533diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12534index 1c66d30..e294b5f 100644
12535--- a/arch/x86/include/asm/uaccess_64.h
12536+++ b/arch/x86/include/asm/uaccess_64.h
12537@@ -10,6 +10,9 @@
12538 #include <asm/alternative.h>
12539 #include <asm/cpufeature.h>
12540 #include <asm/page.h>
12541+#include <asm/pgtable.h>
12542+
12543+#define set_fs(x) (current_thread_info()->addr_limit = (x))
12544
12545 /*
12546 * Copy To/From Userspace
12547@@ -17,12 +20,14 @@
12548
12549 /* Handles exceptions in both to and from, but doesn't do access_ok */
12550 __must_check unsigned long
12551-copy_user_generic_string(void *to, const void *from, unsigned len);
12552+copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
12553 __must_check unsigned long
12554-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
12555+copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
12556
12557 static __always_inline __must_check unsigned long
12558-copy_user_generic(void *to, const void *from, unsigned len)
12559+copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
12560+static __always_inline __must_check unsigned long
12561+copy_user_generic(void *to, const void *from, unsigned long len)
12562 {
12563 unsigned ret;
12564
12565@@ -32,142 +37,237 @@ copy_user_generic(void *to, const void *from, unsigned len)
12566 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
12567 "=d" (len)),
12568 "1" (to), "2" (from), "3" (len)
12569- : "memory", "rcx", "r8", "r9", "r10", "r11");
12570+ : "memory", "rcx", "r8", "r9", "r11");
12571 return ret;
12572 }
12573
12574+static __always_inline __must_check unsigned long
12575+__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
12576+static __always_inline __must_check unsigned long
12577+__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
12578 __must_check unsigned long
12579-_copy_to_user(void __user *to, const void *from, unsigned len);
12580-__must_check unsigned long
12581-_copy_from_user(void *to, const void __user *from, unsigned len);
12582-__must_check unsigned long
12583-copy_in_user(void __user *to, const void __user *from, unsigned len);
12584+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
12585
12586 static inline unsigned long __must_check copy_from_user(void *to,
12587 const void __user *from,
12588+ unsigned long n) __size_overflow(3);
12589+static inline unsigned long __must_check copy_from_user(void *to,
12590+ const void __user *from,
12591 unsigned long n)
12592 {
12593- int sz = __compiletime_object_size(to);
12594-
12595 might_fault();
12596- if (likely(sz == -1 || sz >= n))
12597- n = _copy_from_user(to, from, n);
12598-#ifdef CONFIG_DEBUG_VM
12599- else
12600- WARN(1, "Buffer overflow detected!\n");
12601-#endif
12602+
12603+ if (access_ok(VERIFY_READ, from, n))
12604+ n = __copy_from_user(to, from, n);
12605+ else if (n < INT_MAX) {
12606+ if (!__builtin_constant_p(n))
12607+ check_object_size(to, n, false);
12608+ memset(to, 0, n);
12609+ }
12610 return n;
12611 }
12612
12613 static __always_inline __must_check
12614-int copy_to_user(void __user *dst, const void *src, unsigned size)
12615+int copy_to_user(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
12616+static __always_inline __must_check
12617+int copy_to_user(void __user *dst, const void *src, unsigned long size)
12618 {
12619 might_fault();
12620
12621- return _copy_to_user(dst, src, size);
12622+ if (access_ok(VERIFY_WRITE, dst, size))
12623+ size = __copy_to_user(dst, src, size);
12624+ return size;
12625 }
12626
12627 static __always_inline __must_check
12628-int __copy_from_user(void *dst, const void __user *src, unsigned size)
12629+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
12630+static __always_inline __must_check
12631+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12632 {
12633- int ret = 0;
12634+ int sz = __compiletime_object_size(dst);
12635+ unsigned ret = 0;
12636
12637 might_fault();
12638- if (!__builtin_constant_p(size))
12639- return copy_user_generic(dst, (__force void *)src, size);
12640+
12641+ if (size > INT_MAX)
12642+ return size;
12643+
12644+#ifdef CONFIG_PAX_MEMORY_UDEREF
12645+ if (!__access_ok(VERIFY_READ, src, size))
12646+ return size;
12647+#endif
12648+
12649+ if (unlikely(sz != -1 && sz < size)) {
12650+#ifdef CONFIG_DEBUG_VM
12651+ WARN(1, "Buffer overflow detected!\n");
12652+#endif
12653+ return size;
12654+ }
12655+
12656+ if (!__builtin_constant_p(size)) {
12657+ check_object_size(dst, size, false);
12658+
12659+#ifdef CONFIG_PAX_MEMORY_UDEREF
12660+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12661+ src += PAX_USER_SHADOW_BASE;
12662+#endif
12663+
12664+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12665+ }
12666 switch (size) {
12667- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12668+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12669 ret, "b", "b", "=q", 1);
12670 return ret;
12671- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12672+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12673 ret, "w", "w", "=r", 2);
12674 return ret;
12675- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12676+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12677 ret, "l", "k", "=r", 4);
12678 return ret;
12679- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12680+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12681 ret, "q", "", "=r", 8);
12682 return ret;
12683 case 10:
12684- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12685+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12686 ret, "q", "", "=r", 10);
12687 if (unlikely(ret))
12688 return ret;
12689 __get_user_asm(*(u16 *)(8 + (char *)dst),
12690- (u16 __user *)(8 + (char __user *)src),
12691+ (const u16 __user *)(8 + (const char __user *)src),
12692 ret, "w", "w", "=r", 2);
12693 return ret;
12694 case 16:
12695- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12696+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12697 ret, "q", "", "=r", 16);
12698 if (unlikely(ret))
12699 return ret;
12700 __get_user_asm(*(u64 *)(8 + (char *)dst),
12701- (u64 __user *)(8 + (char __user *)src),
12702+ (const u64 __user *)(8 + (const char __user *)src),
12703 ret, "q", "", "=r", 8);
12704 return ret;
12705 default:
12706- return copy_user_generic(dst, (__force void *)src, size);
12707+
12708+#ifdef CONFIG_PAX_MEMORY_UDEREF
12709+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12710+ src += PAX_USER_SHADOW_BASE;
12711+#endif
12712+
12713+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12714 }
12715 }
12716
12717 static __always_inline __must_check
12718-int __copy_to_user(void __user *dst, const void *src, unsigned size)
12719+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
12720+static __always_inline __must_check
12721+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12722 {
12723- int ret = 0;
12724+ int sz = __compiletime_object_size(src);
12725+ unsigned ret = 0;
12726
12727 might_fault();
12728- if (!__builtin_constant_p(size))
12729- return copy_user_generic((__force void *)dst, src, size);
12730+
12731+ if (size > INT_MAX)
12732+ return size;
12733+
12734+#ifdef CONFIG_PAX_MEMORY_UDEREF
12735+ if (!__access_ok(VERIFY_WRITE, dst, size))
12736+ return size;
12737+#endif
12738+
12739+ if (unlikely(sz != -1 && sz < size)) {
12740+#ifdef CONFIG_DEBUG_VM
12741+ WARN(1, "Buffer overflow detected!\n");
12742+#endif
12743+ return size;
12744+ }
12745+
12746+ if (!__builtin_constant_p(size)) {
12747+ check_object_size(src, size, true);
12748+
12749+#ifdef CONFIG_PAX_MEMORY_UDEREF
12750+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12751+ dst += PAX_USER_SHADOW_BASE;
12752+#endif
12753+
12754+ return copy_user_generic((__force_kernel void *)dst, src, size);
12755+ }
12756 switch (size) {
12757- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12758+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12759 ret, "b", "b", "iq", 1);
12760 return ret;
12761- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12762+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12763 ret, "w", "w", "ir", 2);
12764 return ret;
12765- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12766+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12767 ret, "l", "k", "ir", 4);
12768 return ret;
12769- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12770+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12771 ret, "q", "", "er", 8);
12772 return ret;
12773 case 10:
12774- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12775+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12776 ret, "q", "", "er", 10);
12777 if (unlikely(ret))
12778 return ret;
12779 asm("":::"memory");
12780- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12781+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12782 ret, "w", "w", "ir", 2);
12783 return ret;
12784 case 16:
12785- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12786+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12787 ret, "q", "", "er", 16);
12788 if (unlikely(ret))
12789 return ret;
12790 asm("":::"memory");
12791- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12792+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12793 ret, "q", "", "er", 8);
12794 return ret;
12795 default:
12796- return copy_user_generic((__force void *)dst, src, size);
12797+
12798+#ifdef CONFIG_PAX_MEMORY_UDEREF
12799+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12800+ dst += PAX_USER_SHADOW_BASE;
12801+#endif
12802+
12803+ return copy_user_generic((__force_kernel void *)dst, src, size);
12804 }
12805 }
12806
12807 static __always_inline __must_check
12808-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12809+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size) __size_overflow(3);
12810+static __always_inline __must_check
12811+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12812 {
12813- int ret = 0;
12814+ unsigned ret = 0;
12815
12816 might_fault();
12817- if (!__builtin_constant_p(size))
12818- return copy_user_generic((__force void *)dst,
12819- (__force void *)src, size);
12820+
12821+ if (size > INT_MAX)
12822+ return size;
12823+
12824+#ifdef CONFIG_PAX_MEMORY_UDEREF
12825+ if (!__access_ok(VERIFY_READ, src, size))
12826+ return size;
12827+ if (!__access_ok(VERIFY_WRITE, dst, size))
12828+ return size;
12829+#endif
12830+
12831+ if (!__builtin_constant_p(size)) {
12832+
12833+#ifdef CONFIG_PAX_MEMORY_UDEREF
12834+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12835+ src += PAX_USER_SHADOW_BASE;
12836+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12837+ dst += PAX_USER_SHADOW_BASE;
12838+#endif
12839+
12840+ return copy_user_generic((__force_kernel void *)dst,
12841+ (__force_kernel const void *)src, size);
12842+ }
12843 switch (size) {
12844 case 1: {
12845 u8 tmp;
12846- __get_user_asm(tmp, (u8 __user *)src,
12847+ __get_user_asm(tmp, (const u8 __user *)src,
12848 ret, "b", "b", "=q", 1);
12849 if (likely(!ret))
12850 __put_user_asm(tmp, (u8 __user *)dst,
12851@@ -176,7 +276,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12852 }
12853 case 2: {
12854 u16 tmp;
12855- __get_user_asm(tmp, (u16 __user *)src,
12856+ __get_user_asm(tmp, (const u16 __user *)src,
12857 ret, "w", "w", "=r", 2);
12858 if (likely(!ret))
12859 __put_user_asm(tmp, (u16 __user *)dst,
12860@@ -186,7 +286,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12861
12862 case 4: {
12863 u32 tmp;
12864- __get_user_asm(tmp, (u32 __user *)src,
12865+ __get_user_asm(tmp, (const u32 __user *)src,
12866 ret, "l", "k", "=r", 4);
12867 if (likely(!ret))
12868 __put_user_asm(tmp, (u32 __user *)dst,
12869@@ -195,7 +295,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12870 }
12871 case 8: {
12872 u64 tmp;
12873- __get_user_asm(tmp, (u64 __user *)src,
12874+ __get_user_asm(tmp, (const u64 __user *)src,
12875 ret, "q", "", "=r", 8);
12876 if (likely(!ret))
12877 __put_user_asm(tmp, (u64 __user *)dst,
12878@@ -203,8 +303,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12879 return ret;
12880 }
12881 default:
12882- return copy_user_generic((__force void *)dst,
12883- (__force void *)src, size);
12884+
12885+#ifdef CONFIG_PAX_MEMORY_UDEREF
12886+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12887+ src += PAX_USER_SHADOW_BASE;
12888+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12889+ dst += PAX_USER_SHADOW_BASE;
12890+#endif
12891+
12892+ return copy_user_generic((__force_kernel void *)dst,
12893+ (__force_kernel const void *)src, size);
12894 }
12895 }
12896
12897@@ -215,39 +323,83 @@ __strncpy_from_user(char *dst, const char __user *src, long count);
12898 __must_check long strnlen_user(const char __user *str, long n);
12899 __must_check long __strnlen_user(const char __user *str, long n);
12900 __must_check long strlen_user(const char __user *str);
12901-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
12902-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
12903+__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
12904+__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
12905
12906 static __must_check __always_inline int
12907-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
12908+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
12909+static __must_check __always_inline int
12910+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
12911 {
12912- return copy_user_generic(dst, (__force const void *)src, size);
12913+ if (size > INT_MAX)
12914+ return size;
12915+
12916+#ifdef CONFIG_PAX_MEMORY_UDEREF
12917+ if (!__access_ok(VERIFY_READ, src, size))
12918+ return size;
12919+
12920+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12921+ src += PAX_USER_SHADOW_BASE;
12922+#endif
12923+
12924+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12925 }
12926
12927-static __must_check __always_inline int
12928-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
12929+static __must_check __always_inline unsigned long
12930+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
12931+static __must_check __always_inline unsigned long
12932+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
12933 {
12934- return copy_user_generic((__force void *)dst, src, size);
12935+ if (size > INT_MAX)
12936+ return size;
12937+
12938+#ifdef CONFIG_PAX_MEMORY_UDEREF
12939+ if (!__access_ok(VERIFY_WRITE, dst, size))
12940+ return size;
12941+
12942+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12943+ dst += PAX_USER_SHADOW_BASE;
12944+#endif
12945+
12946+ return copy_user_generic((__force_kernel void *)dst, src, size);
12947 }
12948
12949-extern long __copy_user_nocache(void *dst, const void __user *src,
12950- unsigned size, int zerorest);
12951+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
12952+ unsigned long size, int zerorest) __size_overflow(3);
12953
12954-static inline int
12955-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12956+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
12957+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
12958 {
12959 might_sleep();
12960+
12961+ if (size > INT_MAX)
12962+ return size;
12963+
12964+#ifdef CONFIG_PAX_MEMORY_UDEREF
12965+ if (!__access_ok(VERIFY_READ, src, size))
12966+ return size;
12967+#endif
12968+
12969 return __copy_user_nocache(dst, src, size, 1);
12970 }
12971
12972-static inline int
12973-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12974- unsigned size)
12975+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12976+ unsigned long size) __size_overflow(3);
12977+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12978+ unsigned long size)
12979 {
12980+ if (size > INT_MAX)
12981+ return size;
12982+
12983+#ifdef CONFIG_PAX_MEMORY_UDEREF
12984+ if (!__access_ok(VERIFY_READ, src, size))
12985+ return size;
12986+#endif
12987+
12988 return __copy_user_nocache(dst, src, size, 0);
12989 }
12990
12991-unsigned long
12992-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
12993+extern unsigned long
12994+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
12995
12996 #endif /* _ASM_X86_UACCESS_64_H */
12997diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
12998index bb05228..d763d5b 100644
12999--- a/arch/x86/include/asm/vdso.h
13000+++ b/arch/x86/include/asm/vdso.h
13001@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13002 #define VDSO32_SYMBOL(base, name) \
13003 ({ \
13004 extern const char VDSO32_##name[]; \
13005- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13006+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13007 })
13008 #endif
13009
13010diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13011index 517d476..a1cb4d9 100644
13012--- a/arch/x86/include/asm/x86_init.h
13013+++ b/arch/x86/include/asm/x86_init.h
13014@@ -29,7 +29,7 @@ struct x86_init_mpparse {
13015 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13016 void (*find_smp_config)(void);
13017 void (*get_smp_config)(unsigned int early);
13018-};
13019+} __no_const;
13020
13021 /**
13022 * struct x86_init_resources - platform specific resource related ops
13023@@ -43,7 +43,7 @@ struct x86_init_resources {
13024 void (*probe_roms)(void);
13025 void (*reserve_resources)(void);
13026 char *(*memory_setup)(void);
13027-};
13028+} __no_const;
13029
13030 /**
13031 * struct x86_init_irqs - platform specific interrupt setup
13032@@ -56,7 +56,7 @@ struct x86_init_irqs {
13033 void (*pre_vector_init)(void);
13034 void (*intr_init)(void);
13035 void (*trap_init)(void);
13036-};
13037+} __no_const;
13038
13039 /**
13040 * struct x86_init_oem - oem platform specific customizing functions
13041@@ -66,7 +66,7 @@ struct x86_init_irqs {
13042 struct x86_init_oem {
13043 void (*arch_setup)(void);
13044 void (*banner)(void);
13045-};
13046+} __no_const;
13047
13048 /**
13049 * struct x86_init_mapping - platform specific initial kernel pagetable setup
13050@@ -77,7 +77,7 @@ struct x86_init_oem {
13051 */
13052 struct x86_init_mapping {
13053 void (*pagetable_reserve)(u64 start, u64 end);
13054-};
13055+} __no_const;
13056
13057 /**
13058 * struct x86_init_paging - platform specific paging functions
13059@@ -87,7 +87,7 @@ struct x86_init_mapping {
13060 struct x86_init_paging {
13061 void (*pagetable_setup_start)(pgd_t *base);
13062 void (*pagetable_setup_done)(pgd_t *base);
13063-};
13064+} __no_const;
13065
13066 /**
13067 * struct x86_init_timers - platform specific timer setup
13068@@ -102,7 +102,7 @@ struct x86_init_timers {
13069 void (*tsc_pre_init)(void);
13070 void (*timer_init)(void);
13071 void (*wallclock_init)(void);
13072-};
13073+} __no_const;
13074
13075 /**
13076 * struct x86_init_iommu - platform specific iommu setup
13077@@ -110,7 +110,7 @@ struct x86_init_timers {
13078 */
13079 struct x86_init_iommu {
13080 int (*iommu_init)(void);
13081-};
13082+} __no_const;
13083
13084 /**
13085 * struct x86_init_pci - platform specific pci init functions
13086@@ -124,7 +124,7 @@ struct x86_init_pci {
13087 int (*init)(void);
13088 void (*init_irq)(void);
13089 void (*fixup_irqs)(void);
13090-};
13091+} __no_const;
13092
13093 /**
13094 * struct x86_init_ops - functions for platform specific setup
13095@@ -140,7 +140,7 @@ struct x86_init_ops {
13096 struct x86_init_timers timers;
13097 struct x86_init_iommu iommu;
13098 struct x86_init_pci pci;
13099-};
13100+} __no_const;
13101
13102 /**
13103 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13104@@ -149,7 +149,7 @@ struct x86_init_ops {
13105 struct x86_cpuinit_ops {
13106 void (*setup_percpu_clockev)(void);
13107 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
13108-};
13109+} __no_const;
13110
13111 /**
13112 * struct x86_platform_ops - platform specific runtime functions
13113@@ -171,7 +171,7 @@ struct x86_platform_ops {
13114 void (*nmi_init)(void);
13115 unsigned char (*get_nmi_reason)(void);
13116 int (*i8042_detect)(void);
13117-};
13118+} __no_const;
13119
13120 struct pci_dev;
13121
13122@@ -180,7 +180,7 @@ struct x86_msi_ops {
13123 void (*teardown_msi_irq)(unsigned int irq);
13124 void (*teardown_msi_irqs)(struct pci_dev *dev);
13125 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
13126-};
13127+} __no_const;
13128
13129 extern struct x86_init_ops x86_init;
13130 extern struct x86_cpuinit_ops x86_cpuinit;
13131diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13132index c6ce245..ffbdab7 100644
13133--- a/arch/x86/include/asm/xsave.h
13134+++ b/arch/x86/include/asm/xsave.h
13135@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13136 {
13137 int err;
13138
13139+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13140+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13141+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13142+#endif
13143+
13144 /*
13145 * Clear the xsave header first, so that reserved fields are
13146 * initialized to zero.
13147@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13148 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13149 {
13150 int err;
13151- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13152+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13153 u32 lmask = mask;
13154 u32 hmask = mask >> 32;
13155
13156+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13157+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13158+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13159+#endif
13160+
13161 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13162 "2:\n"
13163 ".section .fixup,\"ax\"\n"
13164diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13165index 6a564ac..9b1340c 100644
13166--- a/arch/x86/kernel/acpi/realmode/Makefile
13167+++ b/arch/x86/kernel/acpi/realmode/Makefile
13168@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13169 $(call cc-option, -fno-stack-protector) \
13170 $(call cc-option, -mpreferred-stack-boundary=2)
13171 KBUILD_CFLAGS += $(call cc-option, -m32)
13172+ifdef CONSTIFY_PLUGIN
13173+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13174+endif
13175 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13176 GCOV_PROFILE := n
13177
13178diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13179index b4fd836..4358fe3 100644
13180--- a/arch/x86/kernel/acpi/realmode/wakeup.S
13181+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13182@@ -108,6 +108,9 @@ wakeup_code:
13183 /* Do any other stuff... */
13184
13185 #ifndef CONFIG_64BIT
13186+ /* Recheck NX bit overrides (64bit path does this in trampoline */
13187+ call verify_cpu
13188+
13189 /* This could also be done in C code... */
13190 movl pmode_cr3, %eax
13191 movl %eax, %cr3
13192@@ -131,6 +134,7 @@ wakeup_code:
13193 movl pmode_cr0, %eax
13194 movl %eax, %cr0
13195 jmp pmode_return
13196+# include "../../verify_cpu.S"
13197 #else
13198 pushw $0
13199 pushw trampoline_segment
13200diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13201index 103b6ab..2004d0a 100644
13202--- a/arch/x86/kernel/acpi/sleep.c
13203+++ b/arch/x86/kernel/acpi/sleep.c
13204@@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
13205 header->trampoline_segment = trampoline_address() >> 4;
13206 #ifdef CONFIG_SMP
13207 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13208+
13209+ pax_open_kernel();
13210 early_gdt_descr.address =
13211 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13212+ pax_close_kernel();
13213+
13214 initial_gs = per_cpu_offset(smp_processor_id());
13215 #endif
13216 initial_code = (unsigned long)wakeup_long64;
13217diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13218index 13ab720..95d5442 100644
13219--- a/arch/x86/kernel/acpi/wakeup_32.S
13220+++ b/arch/x86/kernel/acpi/wakeup_32.S
13221@@ -30,13 +30,11 @@ wakeup_pmode_return:
13222 # and restore the stack ... but you need gdt for this to work
13223 movl saved_context_esp, %esp
13224
13225- movl %cs:saved_magic, %eax
13226- cmpl $0x12345678, %eax
13227+ cmpl $0x12345678, saved_magic
13228 jne bogus_magic
13229
13230 # jump to place where we left off
13231- movl saved_eip, %eax
13232- jmp *%eax
13233+ jmp *(saved_eip)
13234
13235 bogus_magic:
13236 jmp bogus_magic
13237diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13238index 1f84794..e23f862 100644
13239--- a/arch/x86/kernel/alternative.c
13240+++ b/arch/x86/kernel/alternative.c
13241@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13242 */
13243 for (a = start; a < end; a++) {
13244 instr = (u8 *)&a->instr_offset + a->instr_offset;
13245+
13246+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13247+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13248+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13249+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13250+#endif
13251+
13252 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13253 BUG_ON(a->replacementlen > a->instrlen);
13254 BUG_ON(a->instrlen > sizeof(insnbuf));
13255@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13256 for (poff = start; poff < end; poff++) {
13257 u8 *ptr = (u8 *)poff + *poff;
13258
13259+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13260+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13261+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13262+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13263+#endif
13264+
13265 if (!*poff || ptr < text || ptr >= text_end)
13266 continue;
13267 /* turn DS segment override prefix into lock prefix */
13268- if (*ptr == 0x3e)
13269+ if (*ktla_ktva(ptr) == 0x3e)
13270 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13271 };
13272 mutex_unlock(&text_mutex);
13273@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13274 for (poff = start; poff < end; poff++) {
13275 u8 *ptr = (u8 *)poff + *poff;
13276
13277+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13278+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13279+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13280+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13281+#endif
13282+
13283 if (!*poff || ptr < text || ptr >= text_end)
13284 continue;
13285 /* turn lock prefix into DS segment override prefix */
13286- if (*ptr == 0xf0)
13287+ if (*ktla_ktva(ptr) == 0xf0)
13288 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13289 };
13290 mutex_unlock(&text_mutex);
13291@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13292
13293 BUG_ON(p->len > MAX_PATCH_LEN);
13294 /* prep the buffer with the original instructions */
13295- memcpy(insnbuf, p->instr, p->len);
13296+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13297 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13298 (unsigned long)p->instr, p->len);
13299
13300@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13301 if (smp_alt_once)
13302 free_init_pages("SMP alternatives",
13303 (unsigned long)__smp_locks,
13304- (unsigned long)__smp_locks_end);
13305+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13306
13307 restart_nmi();
13308 }
13309@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13310 * instructions. And on the local CPU you need to be protected again NMI or MCE
13311 * handlers seeing an inconsistent instruction while you patch.
13312 */
13313-void *__init_or_module text_poke_early(void *addr, const void *opcode,
13314+void *__kprobes text_poke_early(void *addr, const void *opcode,
13315 size_t len)
13316 {
13317 unsigned long flags;
13318 local_irq_save(flags);
13319- memcpy(addr, opcode, len);
13320+
13321+ pax_open_kernel();
13322+ memcpy(ktla_ktva(addr), opcode, len);
13323 sync_core();
13324+ pax_close_kernel();
13325+
13326 local_irq_restore(flags);
13327 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13328 that causes hangs on some VIA CPUs. */
13329@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13330 */
13331 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13332 {
13333- unsigned long flags;
13334- char *vaddr;
13335+ unsigned char *vaddr = ktla_ktva(addr);
13336 struct page *pages[2];
13337- int i;
13338+ size_t i;
13339
13340 if (!core_kernel_text((unsigned long)addr)) {
13341- pages[0] = vmalloc_to_page(addr);
13342- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13343+ pages[0] = vmalloc_to_page(vaddr);
13344+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13345 } else {
13346- pages[0] = virt_to_page(addr);
13347+ pages[0] = virt_to_page(vaddr);
13348 WARN_ON(!PageReserved(pages[0]));
13349- pages[1] = virt_to_page(addr + PAGE_SIZE);
13350+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13351 }
13352 BUG_ON(!pages[0]);
13353- local_irq_save(flags);
13354- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13355- if (pages[1])
13356- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13357- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13358- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13359- clear_fixmap(FIX_TEXT_POKE0);
13360- if (pages[1])
13361- clear_fixmap(FIX_TEXT_POKE1);
13362- local_flush_tlb();
13363- sync_core();
13364- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13365- that causes hangs on some VIA CPUs. */
13366+ text_poke_early(addr, opcode, len);
13367 for (i = 0; i < len; i++)
13368- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13369- local_irq_restore(flags);
13370+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13371 return addr;
13372 }
13373
13374diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13375index 2eec05b..fef012b 100644
13376--- a/arch/x86/kernel/apic/apic.c
13377+++ b/arch/x86/kernel/apic/apic.c
13378@@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
13379 /*
13380 * Debug level, exported for io_apic.c
13381 */
13382-unsigned int apic_verbosity;
13383+int apic_verbosity;
13384
13385 int pic_mode;
13386
13387@@ -1908,7 +1908,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13388 apic_write(APIC_ESR, 0);
13389 v1 = apic_read(APIC_ESR);
13390 ack_APIC_irq();
13391- atomic_inc(&irq_err_count);
13392+ atomic_inc_unchecked(&irq_err_count);
13393
13394 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13395 smp_processor_id(), v0 , v1);
13396diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13397index 6d10a66..f4687b8 100644
13398--- a/arch/x86/kernel/apic/io_apic.c
13399+++ b/arch/x86/kernel/apic/io_apic.c
13400@@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13401 }
13402 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13403
13404-void lock_vector_lock(void)
13405+void lock_vector_lock(void) __acquires(vector_lock)
13406 {
13407 /* Used to the online set of cpus does not change
13408 * during assign_irq_vector.
13409@@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
13410 raw_spin_lock(&vector_lock);
13411 }
13412
13413-void unlock_vector_lock(void)
13414+void unlock_vector_lock(void) __releases(vector_lock)
13415 {
13416 raw_spin_unlock(&vector_lock);
13417 }
13418@@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
13419 ack_APIC_irq();
13420 }
13421
13422-atomic_t irq_mis_count;
13423+atomic_unchecked_t irq_mis_count;
13424
13425 static void ack_apic_level(struct irq_data *data)
13426 {
13427@@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
13428 * at the cpu.
13429 */
13430 if (!(v & (1 << (i & 0x1f)))) {
13431- atomic_inc(&irq_mis_count);
13432+ atomic_inc_unchecked(&irq_mis_count);
13433
13434 eoi_ioapic_irq(irq, cfg);
13435 }
13436diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13437index f76623c..aab694f 100644
13438--- a/arch/x86/kernel/apm_32.c
13439+++ b/arch/x86/kernel/apm_32.c
13440@@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
13441 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13442 * even though they are called in protected mode.
13443 */
13444-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13445+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13446 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13447
13448 static const char driver_version[] = "1.16ac"; /* no spaces */
13449@@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
13450 BUG_ON(cpu != 0);
13451 gdt = get_cpu_gdt_table(cpu);
13452 save_desc_40 = gdt[0x40 / 8];
13453+
13454+ pax_open_kernel();
13455 gdt[0x40 / 8] = bad_bios_desc;
13456+ pax_close_kernel();
13457
13458 apm_irq_save(flags);
13459 APM_DO_SAVE_SEGS;
13460@@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
13461 &call->esi);
13462 APM_DO_RESTORE_SEGS;
13463 apm_irq_restore(flags);
13464+
13465+ pax_open_kernel();
13466 gdt[0x40 / 8] = save_desc_40;
13467+ pax_close_kernel();
13468+
13469 put_cpu();
13470
13471 return call->eax & 0xff;
13472@@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
13473 BUG_ON(cpu != 0);
13474 gdt = get_cpu_gdt_table(cpu);
13475 save_desc_40 = gdt[0x40 / 8];
13476+
13477+ pax_open_kernel();
13478 gdt[0x40 / 8] = bad_bios_desc;
13479+ pax_close_kernel();
13480
13481 apm_irq_save(flags);
13482 APM_DO_SAVE_SEGS;
13483@@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
13484 &call->eax);
13485 APM_DO_RESTORE_SEGS;
13486 apm_irq_restore(flags);
13487+
13488+ pax_open_kernel();
13489 gdt[0x40 / 8] = save_desc_40;
13490+ pax_close_kernel();
13491+
13492 put_cpu();
13493 return error;
13494 }
13495@@ -2347,12 +2361,15 @@ static int __init apm_init(void)
13496 * code to that CPU.
13497 */
13498 gdt = get_cpu_gdt_table(0);
13499+
13500+ pax_open_kernel();
13501 set_desc_base(&gdt[APM_CS >> 3],
13502 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13503 set_desc_base(&gdt[APM_CS_16 >> 3],
13504 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13505 set_desc_base(&gdt[APM_DS >> 3],
13506 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13507+ pax_close_kernel();
13508
13509 proc_create("apm", 0, NULL, &apm_file_ops);
13510
13511diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
13512index 68de2dc..1f3c720 100644
13513--- a/arch/x86/kernel/asm-offsets.c
13514+++ b/arch/x86/kernel/asm-offsets.c
13515@@ -33,6 +33,8 @@ void common(void) {
13516 OFFSET(TI_status, thread_info, status);
13517 OFFSET(TI_addr_limit, thread_info, addr_limit);
13518 OFFSET(TI_preempt_count, thread_info, preempt_count);
13519+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13520+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13521
13522 BLANK();
13523 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
13524@@ -53,8 +55,26 @@ void common(void) {
13525 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13526 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13527 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13528+
13529+#ifdef CONFIG_PAX_KERNEXEC
13530+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13531 #endif
13532
13533+#ifdef CONFIG_PAX_MEMORY_UDEREF
13534+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13535+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13536+#ifdef CONFIG_X86_64
13537+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13538+#endif
13539+#endif
13540+
13541+#endif
13542+
13543+ BLANK();
13544+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13545+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13546+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13547+
13548 #ifdef CONFIG_XEN
13549 BLANK();
13550 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13551diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13552index 834e897..dacddc8 100644
13553--- a/arch/x86/kernel/asm-offsets_64.c
13554+++ b/arch/x86/kernel/asm-offsets_64.c
13555@@ -70,6 +70,7 @@ int main(void)
13556 BLANK();
13557 #undef ENTRY
13558
13559+ DEFINE(TSS_size, sizeof(struct tss_struct));
13560 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
13561 BLANK();
13562
13563diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13564index 25f24dc..4094a7f 100644
13565--- a/arch/x86/kernel/cpu/Makefile
13566+++ b/arch/x86/kernel/cpu/Makefile
13567@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
13568 CFLAGS_REMOVE_perf_event.o = -pg
13569 endif
13570
13571-# Make sure load_percpu_segment has no stackprotector
13572-nostackp := $(call cc-option, -fno-stack-protector)
13573-CFLAGS_common.o := $(nostackp)
13574-
13575 obj-y := intel_cacheinfo.o scattered.o topology.o
13576 obj-y += proc.o capflags.o powerflags.o common.o
13577 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
13578diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13579index f4773f4..b3fb13c 100644
13580--- a/arch/x86/kernel/cpu/amd.c
13581+++ b/arch/x86/kernel/cpu/amd.c
13582@@ -669,7 +669,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13583 unsigned int size)
13584 {
13585 /* AMD errata T13 (order #21922) */
13586- if ((c->x86 == 6)) {
13587+ if (c->x86 == 6) {
13588 /* Duron Rev A0 */
13589 if (c->x86_model == 3 && c->x86_mask == 0)
13590 size = 64;
13591diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13592index c0f7d68..aa418f9 100644
13593--- a/arch/x86/kernel/cpu/common.c
13594+++ b/arch/x86/kernel/cpu/common.c
13595@@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13596
13597 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13598
13599-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13600-#ifdef CONFIG_X86_64
13601- /*
13602- * We need valid kernel segments for data and code in long mode too
13603- * IRET will check the segment types kkeil 2000/10/28
13604- * Also sysret mandates a special GDT layout
13605- *
13606- * TLS descriptors are currently at a different place compared to i386.
13607- * Hopefully nobody expects them at a fixed place (Wine?)
13608- */
13609- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13610- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13611- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13612- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13613- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13614- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13615-#else
13616- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13617- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13618- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13619- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13620- /*
13621- * Segments used for calling PnP BIOS have byte granularity.
13622- * They code segments and data segments have fixed 64k limits,
13623- * the transfer segment sizes are set at run time.
13624- */
13625- /* 32-bit code */
13626- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13627- /* 16-bit code */
13628- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13629- /* 16-bit data */
13630- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13631- /* 16-bit data */
13632- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13633- /* 16-bit data */
13634- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13635- /*
13636- * The APM segments have byte granularity and their bases
13637- * are set at run time. All have 64k limits.
13638- */
13639- /* 32-bit code */
13640- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13641- /* 16-bit code */
13642- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13643- /* data */
13644- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13645-
13646- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13647- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13648- GDT_STACK_CANARY_INIT
13649-#endif
13650-} };
13651-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13652-
13653 static int __init x86_xsave_setup(char *s)
13654 {
13655 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13656@@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
13657 {
13658 struct desc_ptr gdt_descr;
13659
13660- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13661+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13662 gdt_descr.size = GDT_SIZE - 1;
13663 load_gdt(&gdt_descr);
13664 /* Reload the per-cpu base */
13665@@ -839,6 +785,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13666 /* Filter out anything that depends on CPUID levels we don't have */
13667 filter_cpuid_features(c, true);
13668
13669+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13670+ setup_clear_cpu_cap(X86_FEATURE_SEP);
13671+#endif
13672+
13673 /* If the model name is still unset, do table lookup. */
13674 if (!c->x86_model_id[0]) {
13675 const char *p;
13676@@ -1019,10 +969,12 @@ static __init int setup_disablecpuid(char *arg)
13677 }
13678 __setup("clearcpuid=", setup_disablecpuid);
13679
13680+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13681+EXPORT_PER_CPU_SYMBOL(current_tinfo);
13682+
13683 #ifdef CONFIG_X86_64
13684 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13685-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
13686- (unsigned long) nmi_idt_table };
13687+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
13688
13689 DEFINE_PER_CPU_FIRST(union irq_stack_union,
13690 irq_stack_union) __aligned(PAGE_SIZE);
13691@@ -1036,7 +988,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13692 EXPORT_PER_CPU_SYMBOL(current_task);
13693
13694 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13695- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13696+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13697 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13698
13699 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13700@@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13701 {
13702 memset(regs, 0, sizeof(struct pt_regs));
13703 regs->fs = __KERNEL_PERCPU;
13704- regs->gs = __KERNEL_STACK_CANARY;
13705+ savesegment(gs, regs->gs);
13706
13707 return regs;
13708 }
13709@@ -1190,7 +1142,7 @@ void __cpuinit cpu_init(void)
13710 int i;
13711
13712 cpu = stack_smp_processor_id();
13713- t = &per_cpu(init_tss, cpu);
13714+ t = init_tss + cpu;
13715 oist = &per_cpu(orig_ist, cpu);
13716
13717 #ifdef CONFIG_NUMA
13718@@ -1216,7 +1168,7 @@ void __cpuinit cpu_init(void)
13719 switch_to_new_gdt(cpu);
13720 loadsegment(fs, 0);
13721
13722- load_idt((const struct desc_ptr *)&idt_descr);
13723+ load_idt(&idt_descr);
13724
13725 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13726 syscall_init();
13727@@ -1225,7 +1177,6 @@ void __cpuinit cpu_init(void)
13728 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13729 barrier();
13730
13731- x86_configure_nx();
13732 if (cpu != 0)
13733 enable_x2apic();
13734
13735@@ -1281,7 +1232,7 @@ void __cpuinit cpu_init(void)
13736 {
13737 int cpu = smp_processor_id();
13738 struct task_struct *curr = current;
13739- struct tss_struct *t = &per_cpu(init_tss, cpu);
13740+ struct tss_struct *t = init_tss + cpu;
13741 struct thread_struct *thread = &curr->thread;
13742
13743 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13744diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13745index 3e6ff6c..54b4992 100644
13746--- a/arch/x86/kernel/cpu/intel.c
13747+++ b/arch/x86/kernel/cpu/intel.c
13748@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13749 * Update the IDT descriptor and reload the IDT so that
13750 * it uses the read-only mapped virtual address.
13751 */
13752- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13753+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13754 load_idt(&idt_descr);
13755 }
13756 #endif
13757diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13758index fc4beb3..f20a5a7 100644
13759--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
13760+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13761@@ -199,6 +199,8 @@ static void raise_mce(struct mce *m)
13762
13763 /* Error injection interface */
13764 static ssize_t mce_write(struct file *filp, const char __user *ubuf,
13765+ size_t usize, loff_t *off) __size_overflow(3);
13766+static ssize_t mce_write(struct file *filp, const char __user *ubuf,
13767 size_t usize, loff_t *off)
13768 {
13769 struct mce m;
13770diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
13771index 5a11ae2..a1a1c8a 100644
13772--- a/arch/x86/kernel/cpu/mcheck/mce.c
13773+++ b/arch/x86/kernel/cpu/mcheck/mce.c
13774@@ -42,6 +42,7 @@
13775 #include <asm/processor.h>
13776 #include <asm/mce.h>
13777 #include <asm/msr.h>
13778+#include <asm/local.h>
13779
13780 #include "mce-internal.h"
13781
13782@@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
13783 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
13784 m->cs, m->ip);
13785
13786- if (m->cs == __KERNEL_CS)
13787+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
13788 print_symbol("{%s}", m->ip);
13789 pr_cont("\n");
13790 }
13791@@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
13792
13793 #define PANIC_TIMEOUT 5 /* 5 seconds */
13794
13795-static atomic_t mce_paniced;
13796+static atomic_unchecked_t mce_paniced;
13797
13798 static int fake_panic;
13799-static atomic_t mce_fake_paniced;
13800+static atomic_unchecked_t mce_fake_paniced;
13801
13802 /* Panic in progress. Enable interrupts and wait for final IPI */
13803 static void wait_for_panic(void)
13804@@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13805 /*
13806 * Make sure only one CPU runs in machine check panic
13807 */
13808- if (atomic_inc_return(&mce_paniced) > 1)
13809+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
13810 wait_for_panic();
13811 barrier();
13812
13813@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13814 console_verbose();
13815 } else {
13816 /* Don't log too much for fake panic */
13817- if (atomic_inc_return(&mce_fake_paniced) > 1)
13818+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
13819 return;
13820 }
13821 /* First print corrected ones that are still unlogged */
13822@@ -658,7 +659,7 @@ static int mce_timed_out(u64 *t)
13823 * might have been modified by someone else.
13824 */
13825 rmb();
13826- if (atomic_read(&mce_paniced))
13827+ if (atomic_read_unchecked(&mce_paniced))
13828 wait_for_panic();
13829 if (!monarch_timeout)
13830 goto out;
13831@@ -1446,7 +1447,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
13832 }
13833
13834 /* Call the installed machine check handler for this CPU setup. */
13835-void (*machine_check_vector)(struct pt_regs *, long error_code) =
13836+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
13837 unexpected_machine_check;
13838
13839 /*
13840@@ -1469,7 +1470,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
13841 return;
13842 }
13843
13844+ pax_open_kernel();
13845 machine_check_vector = do_machine_check;
13846+ pax_close_kernel();
13847
13848 __mcheck_cpu_init_generic();
13849 __mcheck_cpu_init_vendor(c);
13850@@ -1483,7 +1486,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
13851 */
13852
13853 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
13854-static int mce_chrdev_open_count; /* #times opened */
13855+static local_t mce_chrdev_open_count; /* #times opened */
13856 static int mce_chrdev_open_exclu; /* already open exclusive? */
13857
13858 static int mce_chrdev_open(struct inode *inode, struct file *file)
13859@@ -1491,7 +1494,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
13860 spin_lock(&mce_chrdev_state_lock);
13861
13862 if (mce_chrdev_open_exclu ||
13863- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
13864+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
13865 spin_unlock(&mce_chrdev_state_lock);
13866
13867 return -EBUSY;
13868@@ -1499,7 +1502,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
13869
13870 if (file->f_flags & O_EXCL)
13871 mce_chrdev_open_exclu = 1;
13872- mce_chrdev_open_count++;
13873+ local_inc(&mce_chrdev_open_count);
13874
13875 spin_unlock(&mce_chrdev_state_lock);
13876
13877@@ -1510,7 +1513,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
13878 {
13879 spin_lock(&mce_chrdev_state_lock);
13880
13881- mce_chrdev_open_count--;
13882+ local_dec(&mce_chrdev_open_count);
13883 mce_chrdev_open_exclu = 0;
13884
13885 spin_unlock(&mce_chrdev_state_lock);
13886@@ -2229,7 +2232,7 @@ struct dentry *mce_get_debugfs_dir(void)
13887 static void mce_reset(void)
13888 {
13889 cpu_missing = 0;
13890- atomic_set(&mce_fake_paniced, 0);
13891+ atomic_set_unchecked(&mce_fake_paniced, 0);
13892 atomic_set(&mce_executing, 0);
13893 atomic_set(&mce_callin, 0);
13894 atomic_set(&global_nwo, 0);
13895diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
13896index 5c0e653..0882b0a 100644
13897--- a/arch/x86/kernel/cpu/mcheck/p5.c
13898+++ b/arch/x86/kernel/cpu/mcheck/p5.c
13899@@ -12,6 +12,7 @@
13900 #include <asm/system.h>
13901 #include <asm/mce.h>
13902 #include <asm/msr.h>
13903+#include <asm/pgtable.h>
13904
13905 /* By default disabled */
13906 int mce_p5_enabled __read_mostly;
13907@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
13908 if (!cpu_has(c, X86_FEATURE_MCE))
13909 return;
13910
13911+ pax_open_kernel();
13912 machine_check_vector = pentium_machine_check;
13913+ pax_close_kernel();
13914 /* Make sure the vector pointer is visible before we enable MCEs: */
13915 wmb();
13916
13917diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
13918index 54060f5..c1a7577 100644
13919--- a/arch/x86/kernel/cpu/mcheck/winchip.c
13920+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
13921@@ -11,6 +11,7 @@
13922 #include <asm/system.h>
13923 #include <asm/mce.h>
13924 #include <asm/msr.h>
13925+#include <asm/pgtable.h>
13926
13927 /* Machine check handler for WinChip C6: */
13928 static void winchip_machine_check(struct pt_regs *regs, long error_code)
13929@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
13930 {
13931 u32 lo, hi;
13932
13933+ pax_open_kernel();
13934 machine_check_vector = winchip_machine_check;
13935+ pax_close_kernel();
13936 /* Make sure the vector pointer is visible before we enable MCEs: */
13937 wmb();
13938
13939diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
13940index 7928963..1b16001 100644
13941--- a/arch/x86/kernel/cpu/mtrr/if.c
13942+++ b/arch/x86/kernel/cpu/mtrr/if.c
13943@@ -91,6 +91,8 @@ mtrr_file_del(unsigned long base, unsigned long size,
13944 * "base=%Lx size=%Lx type=%s" or "disable=%d"
13945 */
13946 static ssize_t
13947+mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) __size_overflow(3);
13948+static ssize_t
13949 mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
13950 {
13951 int i, err;
13952diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
13953index 6b96110..0da73eb 100644
13954--- a/arch/x86/kernel/cpu/mtrr/main.c
13955+++ b/arch/x86/kernel/cpu/mtrr/main.c
13956@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
13957 u64 size_or_mask, size_and_mask;
13958 static bool mtrr_aps_delayed_init;
13959
13960-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
13961+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
13962
13963 const struct mtrr_ops *mtrr_if;
13964
13965diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
13966index df5e41f..816c719 100644
13967--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
13968+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
13969@@ -25,7 +25,7 @@ struct mtrr_ops {
13970 int (*validate_add_page)(unsigned long base, unsigned long size,
13971 unsigned int type);
13972 int (*have_wrcomb)(void);
13973-};
13974+} __do_const;
13975
13976 extern int generic_get_free_region(unsigned long base, unsigned long size,
13977 int replace_reg);
13978diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
13979index 5adce10..99284ec 100644
13980--- a/arch/x86/kernel/cpu/perf_event.c
13981+++ b/arch/x86/kernel/cpu/perf_event.c
13982@@ -1665,7 +1665,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
13983 break;
13984
13985 perf_callchain_store(entry, frame.return_address);
13986- fp = frame.next_frame;
13987+ fp = (const void __force_user *)frame.next_frame;
13988 }
13989 }
13990
13991diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
13992index 13ad899..f642b9a 100644
13993--- a/arch/x86/kernel/crash.c
13994+++ b/arch/x86/kernel/crash.c
13995@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
13996 {
13997 #ifdef CONFIG_X86_32
13998 struct pt_regs fixed_regs;
13999-#endif
14000
14001-#ifdef CONFIG_X86_32
14002- if (!user_mode_vm(regs)) {
14003+ if (!user_mode(regs)) {
14004 crash_fixup_ss_esp(&fixed_regs, regs);
14005 regs = &fixed_regs;
14006 }
14007diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14008index 37250fe..bf2ec74 100644
14009--- a/arch/x86/kernel/doublefault_32.c
14010+++ b/arch/x86/kernel/doublefault_32.c
14011@@ -11,7 +11,7 @@
14012
14013 #define DOUBLEFAULT_STACKSIZE (1024)
14014 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14015-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14016+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14017
14018 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14019
14020@@ -21,7 +21,7 @@ static void doublefault_fn(void)
14021 unsigned long gdt, tss;
14022
14023 store_gdt(&gdt_desc);
14024- gdt = gdt_desc.address;
14025+ gdt = (unsigned long)gdt_desc.address;
14026
14027 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14028
14029@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14030 /* 0x2 bit is always set */
14031 .flags = X86_EFLAGS_SF | 0x2,
14032 .sp = STACK_START,
14033- .es = __USER_DS,
14034+ .es = __KERNEL_DS,
14035 .cs = __KERNEL_CS,
14036 .ss = __KERNEL_DS,
14037- .ds = __USER_DS,
14038+ .ds = __KERNEL_DS,
14039 .fs = __KERNEL_PERCPU,
14040
14041 .__cr3 = __pa_nodebug(swapper_pg_dir),
14042diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14043index 4025fe4..d8451c6 100644
14044--- a/arch/x86/kernel/dumpstack.c
14045+++ b/arch/x86/kernel/dumpstack.c
14046@@ -2,6 +2,9 @@
14047 * Copyright (C) 1991, 1992 Linus Torvalds
14048 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14049 */
14050+#ifdef CONFIG_GRKERNSEC_HIDESYM
14051+#define __INCLUDED_BY_HIDESYM 1
14052+#endif
14053 #include <linux/kallsyms.h>
14054 #include <linux/kprobes.h>
14055 #include <linux/uaccess.h>
14056@@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
14057 static void
14058 print_ftrace_graph_addr(unsigned long addr, void *data,
14059 const struct stacktrace_ops *ops,
14060- struct thread_info *tinfo, int *graph)
14061+ struct task_struct *task, int *graph)
14062 {
14063- struct task_struct *task = tinfo->task;
14064 unsigned long ret_addr;
14065 int index = task->curr_ret_stack;
14066
14067@@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14068 static inline void
14069 print_ftrace_graph_addr(unsigned long addr, void *data,
14070 const struct stacktrace_ops *ops,
14071- struct thread_info *tinfo, int *graph)
14072+ struct task_struct *task, int *graph)
14073 { }
14074 #endif
14075
14076@@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14077 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14078 */
14079
14080-static inline int valid_stack_ptr(struct thread_info *tinfo,
14081- void *p, unsigned int size, void *end)
14082+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14083 {
14084- void *t = tinfo;
14085 if (end) {
14086 if (p < end && p >= (end-THREAD_SIZE))
14087 return 1;
14088@@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14089 }
14090
14091 unsigned long
14092-print_context_stack(struct thread_info *tinfo,
14093+print_context_stack(struct task_struct *task, void *stack_start,
14094 unsigned long *stack, unsigned long bp,
14095 const struct stacktrace_ops *ops, void *data,
14096 unsigned long *end, int *graph)
14097 {
14098 struct stack_frame *frame = (struct stack_frame *)bp;
14099
14100- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14101+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14102 unsigned long addr;
14103
14104 addr = *stack;
14105@@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
14106 } else {
14107 ops->address(data, addr, 0);
14108 }
14109- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14110+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14111 }
14112 stack++;
14113 }
14114@@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
14115 EXPORT_SYMBOL_GPL(print_context_stack);
14116
14117 unsigned long
14118-print_context_stack_bp(struct thread_info *tinfo,
14119+print_context_stack_bp(struct task_struct *task, void *stack_start,
14120 unsigned long *stack, unsigned long bp,
14121 const struct stacktrace_ops *ops, void *data,
14122 unsigned long *end, int *graph)
14123@@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14124 struct stack_frame *frame = (struct stack_frame *)bp;
14125 unsigned long *ret_addr = &frame->return_address;
14126
14127- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14128+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14129 unsigned long addr = *ret_addr;
14130
14131 if (!__kernel_text_address(addr))
14132@@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14133 ops->address(data, addr, 1);
14134 frame = frame->next_frame;
14135 ret_addr = &frame->return_address;
14136- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14137+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14138 }
14139
14140 return (unsigned long)frame;
14141@@ -186,7 +186,7 @@ void dump_stack(void)
14142
14143 bp = stack_frame(current, NULL);
14144 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14145- current->pid, current->comm, print_tainted(),
14146+ task_pid_nr(current), current->comm, print_tainted(),
14147 init_utsname()->release,
14148 (int)strcspn(init_utsname()->version, " "),
14149 init_utsname()->version);
14150@@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
14151 }
14152 EXPORT_SYMBOL_GPL(oops_begin);
14153
14154+extern void gr_handle_kernel_exploit(void);
14155+
14156 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14157 {
14158 if (regs && kexec_should_crash(current))
14159@@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14160 panic("Fatal exception in interrupt");
14161 if (panic_on_oops)
14162 panic("Fatal exception");
14163- do_exit(signr);
14164+
14165+ gr_handle_kernel_exploit();
14166+
14167+ do_group_exit(signr);
14168 }
14169
14170 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14171@@ -270,7 +275,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14172
14173 show_registers(regs);
14174 #ifdef CONFIG_X86_32
14175- if (user_mode_vm(regs)) {
14176+ if (user_mode(regs)) {
14177 sp = regs->sp;
14178 ss = regs->ss & 0xffff;
14179 } else {
14180@@ -298,7 +303,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14181 unsigned long flags = oops_begin();
14182 int sig = SIGSEGV;
14183
14184- if (!user_mode_vm(regs))
14185+ if (!user_mode(regs))
14186 report_bug(regs->ip, regs);
14187
14188 if (__die(str, regs, err))
14189diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14190index c99f9ed..2a15d80 100644
14191--- a/arch/x86/kernel/dumpstack_32.c
14192+++ b/arch/x86/kernel/dumpstack_32.c
14193@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14194 bp = stack_frame(task, regs);
14195
14196 for (;;) {
14197- struct thread_info *context;
14198+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14199
14200- context = (struct thread_info *)
14201- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14202- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14203+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14204
14205- stack = (unsigned long *)context->previous_esp;
14206- if (!stack)
14207+ if (stack_start == task_stack_page(task))
14208 break;
14209+ stack = *(unsigned long **)stack_start;
14210 if (ops->stack(data, "IRQ") < 0)
14211 break;
14212 touch_nmi_watchdog();
14213@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14214 * When in-kernel, we also print out the stack and code at the
14215 * time of the fault..
14216 */
14217- if (!user_mode_vm(regs)) {
14218+ if (!user_mode(regs)) {
14219 unsigned int code_prologue = code_bytes * 43 / 64;
14220 unsigned int code_len = code_bytes;
14221 unsigned char c;
14222 u8 *ip;
14223+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14224
14225 printk(KERN_EMERG "Stack:\n");
14226 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14227
14228 printk(KERN_EMERG "Code: ");
14229
14230- ip = (u8 *)regs->ip - code_prologue;
14231+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14232 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14233 /* try starting at IP */
14234- ip = (u8 *)regs->ip;
14235+ ip = (u8 *)regs->ip + cs_base;
14236 code_len = code_len - code_prologue + 1;
14237 }
14238 for (i = 0; i < code_len; i++, ip++) {
14239@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14240 printk(KERN_CONT " Bad EIP value.");
14241 break;
14242 }
14243- if (ip == (u8 *)regs->ip)
14244+ if (ip == (u8 *)regs->ip + cs_base)
14245 printk(KERN_CONT "<%02x> ", c);
14246 else
14247 printk(KERN_CONT "%02x ", c);
14248@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14249 {
14250 unsigned short ud2;
14251
14252+ ip = ktla_ktva(ip);
14253 if (ip < PAGE_OFFSET)
14254 return 0;
14255 if (probe_kernel_address((unsigned short *)ip, ud2))
14256@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14257
14258 return ud2 == 0x0b0f;
14259 }
14260+
14261+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14262+void pax_check_alloca(unsigned long size)
14263+{
14264+ unsigned long sp = (unsigned long)&sp, stack_left;
14265+
14266+ /* all kernel stacks are of the same size */
14267+ stack_left = sp & (THREAD_SIZE - 1);
14268+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14269+}
14270+EXPORT_SYMBOL(pax_check_alloca);
14271+#endif
14272diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14273index 17107bd..b2deecf 100644
14274--- a/arch/x86/kernel/dumpstack_64.c
14275+++ b/arch/x86/kernel/dumpstack_64.c
14276@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14277 unsigned long *irq_stack_end =
14278 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14279 unsigned used = 0;
14280- struct thread_info *tinfo;
14281 int graph = 0;
14282 unsigned long dummy;
14283+ void *stack_start;
14284
14285 if (!task)
14286 task = current;
14287@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14288 * current stack address. If the stacks consist of nested
14289 * exceptions
14290 */
14291- tinfo = task_thread_info(task);
14292 for (;;) {
14293 char *id;
14294 unsigned long *estack_end;
14295+
14296 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14297 &used, &id);
14298
14299@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14300 if (ops->stack(data, id) < 0)
14301 break;
14302
14303- bp = ops->walk_stack(tinfo, stack, bp, ops,
14304+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14305 data, estack_end, &graph);
14306 ops->stack(data, "<EOE>");
14307 /*
14308@@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14309 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14310 if (ops->stack(data, "IRQ") < 0)
14311 break;
14312- bp = ops->walk_stack(tinfo, stack, bp,
14313+ bp = ops->walk_stack(task, irq_stack, stack, bp,
14314 ops, data, irq_stack_end, &graph);
14315 /*
14316 * We link to the next stack (which would be
14317@@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14318 /*
14319 * This handles the process stack:
14320 */
14321- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14322+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14323+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14324 put_cpu();
14325 }
14326 EXPORT_SYMBOL(dump_trace);
14327@@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
14328
14329 return ud2 == 0x0b0f;
14330 }
14331+
14332+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14333+void pax_check_alloca(unsigned long size)
14334+{
14335+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14336+ unsigned cpu, used;
14337+ char *id;
14338+
14339+ /* check the process stack first */
14340+ stack_start = (unsigned long)task_stack_page(current);
14341+ stack_end = stack_start + THREAD_SIZE;
14342+ if (likely(stack_start <= sp && sp < stack_end)) {
14343+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14344+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14345+ return;
14346+ }
14347+
14348+ cpu = get_cpu();
14349+
14350+ /* check the irq stacks */
14351+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14352+ stack_start = stack_end - IRQ_STACK_SIZE;
14353+ if (stack_start <= sp && sp < stack_end) {
14354+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14355+ put_cpu();
14356+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14357+ return;
14358+ }
14359+
14360+ /* check the exception stacks */
14361+ used = 0;
14362+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14363+ stack_start = stack_end - EXCEPTION_STKSZ;
14364+ if (stack_end && stack_start <= sp && sp < stack_end) {
14365+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14366+ put_cpu();
14367+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14368+ return;
14369+ }
14370+
14371+ put_cpu();
14372+
14373+ /* unknown stack */
14374+ BUG();
14375+}
14376+EXPORT_SYMBOL(pax_check_alloca);
14377+#endif
14378diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14379index 9b9f18b..9fcaa04 100644
14380--- a/arch/x86/kernel/early_printk.c
14381+++ b/arch/x86/kernel/early_printk.c
14382@@ -7,6 +7,7 @@
14383 #include <linux/pci_regs.h>
14384 #include <linux/pci_ids.h>
14385 #include <linux/errno.h>
14386+#include <linux/sched.h>
14387 #include <asm/io.h>
14388 #include <asm/processor.h>
14389 #include <asm/fcntl.h>
14390diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14391index 7b784f4..76aaad7 100644
14392--- a/arch/x86/kernel/entry_32.S
14393+++ b/arch/x86/kernel/entry_32.S
14394@@ -179,13 +179,146 @@
14395 /*CFI_REL_OFFSET gs, PT_GS*/
14396 .endm
14397 .macro SET_KERNEL_GS reg
14398+
14399+#ifdef CONFIG_CC_STACKPROTECTOR
14400 movl $(__KERNEL_STACK_CANARY), \reg
14401+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14402+ movl $(__USER_DS), \reg
14403+#else
14404+ xorl \reg, \reg
14405+#endif
14406+
14407 movl \reg, %gs
14408 .endm
14409
14410 #endif /* CONFIG_X86_32_LAZY_GS */
14411
14412-.macro SAVE_ALL
14413+.macro pax_enter_kernel
14414+#ifdef CONFIG_PAX_KERNEXEC
14415+ call pax_enter_kernel
14416+#endif
14417+.endm
14418+
14419+.macro pax_exit_kernel
14420+#ifdef CONFIG_PAX_KERNEXEC
14421+ call pax_exit_kernel
14422+#endif
14423+.endm
14424+
14425+#ifdef CONFIG_PAX_KERNEXEC
14426+ENTRY(pax_enter_kernel)
14427+#ifdef CONFIG_PARAVIRT
14428+ pushl %eax
14429+ pushl %ecx
14430+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14431+ mov %eax, %esi
14432+#else
14433+ mov %cr0, %esi
14434+#endif
14435+ bts $16, %esi
14436+ jnc 1f
14437+ mov %cs, %esi
14438+ cmp $__KERNEL_CS, %esi
14439+ jz 3f
14440+ ljmp $__KERNEL_CS, $3f
14441+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14442+2:
14443+#ifdef CONFIG_PARAVIRT
14444+ mov %esi, %eax
14445+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14446+#else
14447+ mov %esi, %cr0
14448+#endif
14449+3:
14450+#ifdef CONFIG_PARAVIRT
14451+ popl %ecx
14452+ popl %eax
14453+#endif
14454+ ret
14455+ENDPROC(pax_enter_kernel)
14456+
14457+ENTRY(pax_exit_kernel)
14458+#ifdef CONFIG_PARAVIRT
14459+ pushl %eax
14460+ pushl %ecx
14461+#endif
14462+ mov %cs, %esi
14463+ cmp $__KERNEXEC_KERNEL_CS, %esi
14464+ jnz 2f
14465+#ifdef CONFIG_PARAVIRT
14466+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14467+ mov %eax, %esi
14468+#else
14469+ mov %cr0, %esi
14470+#endif
14471+ btr $16, %esi
14472+ ljmp $__KERNEL_CS, $1f
14473+1:
14474+#ifdef CONFIG_PARAVIRT
14475+ mov %esi, %eax
14476+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14477+#else
14478+ mov %esi, %cr0
14479+#endif
14480+2:
14481+#ifdef CONFIG_PARAVIRT
14482+ popl %ecx
14483+ popl %eax
14484+#endif
14485+ ret
14486+ENDPROC(pax_exit_kernel)
14487+#endif
14488+
14489+.macro pax_erase_kstack
14490+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14491+ call pax_erase_kstack
14492+#endif
14493+.endm
14494+
14495+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14496+/*
14497+ * ebp: thread_info
14498+ * ecx, edx: can be clobbered
14499+ */
14500+ENTRY(pax_erase_kstack)
14501+ pushl %edi
14502+ pushl %eax
14503+
14504+ mov TI_lowest_stack(%ebp), %edi
14505+ mov $-0xBEEF, %eax
14506+ std
14507+
14508+1: mov %edi, %ecx
14509+ and $THREAD_SIZE_asm - 1, %ecx
14510+ shr $2, %ecx
14511+ repne scasl
14512+ jecxz 2f
14513+
14514+ cmp $2*16, %ecx
14515+ jc 2f
14516+
14517+ mov $2*16, %ecx
14518+ repe scasl
14519+ jecxz 2f
14520+ jne 1b
14521+
14522+2: cld
14523+ mov %esp, %ecx
14524+ sub %edi, %ecx
14525+ shr $2, %ecx
14526+ rep stosl
14527+
14528+ mov TI_task_thread_sp0(%ebp), %edi
14529+ sub $128, %edi
14530+ mov %edi, TI_lowest_stack(%ebp)
14531+
14532+ popl %eax
14533+ popl %edi
14534+ ret
14535+ENDPROC(pax_erase_kstack)
14536+#endif
14537+
14538+.macro __SAVE_ALL _DS
14539 cld
14540 PUSH_GS
14541 pushl_cfi %fs
14542@@ -208,7 +341,7 @@
14543 CFI_REL_OFFSET ecx, 0
14544 pushl_cfi %ebx
14545 CFI_REL_OFFSET ebx, 0
14546- movl $(__USER_DS), %edx
14547+ movl $\_DS, %edx
14548 movl %edx, %ds
14549 movl %edx, %es
14550 movl $(__KERNEL_PERCPU), %edx
14551@@ -216,6 +349,15 @@
14552 SET_KERNEL_GS %edx
14553 .endm
14554
14555+.macro SAVE_ALL
14556+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
14557+ __SAVE_ALL __KERNEL_DS
14558+ pax_enter_kernel
14559+#else
14560+ __SAVE_ALL __USER_DS
14561+#endif
14562+.endm
14563+
14564 .macro RESTORE_INT_REGS
14565 popl_cfi %ebx
14566 CFI_RESTORE ebx
14567@@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
14568 popfl_cfi
14569 jmp syscall_exit
14570 CFI_ENDPROC
14571-END(ret_from_fork)
14572+ENDPROC(ret_from_fork)
14573
14574 /*
14575 * Interrupt exit functions should be protected against kprobes
14576@@ -335,7 +477,15 @@ resume_userspace_sig:
14577 andl $SEGMENT_RPL_MASK, %eax
14578 #endif
14579 cmpl $USER_RPL, %eax
14580+
14581+#ifdef CONFIG_PAX_KERNEXEC
14582+ jae resume_userspace
14583+
14584+ PAX_EXIT_KERNEL
14585+ jmp resume_kernel
14586+#else
14587 jb resume_kernel # not returning to v8086 or userspace
14588+#endif
14589
14590 ENTRY(resume_userspace)
14591 LOCKDEP_SYS_EXIT
14592@@ -347,8 +497,8 @@ ENTRY(resume_userspace)
14593 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
14594 # int/exception return?
14595 jne work_pending
14596- jmp restore_all
14597-END(ret_from_exception)
14598+ jmp restore_all_pax
14599+ENDPROC(ret_from_exception)
14600
14601 #ifdef CONFIG_PREEMPT
14602 ENTRY(resume_kernel)
14603@@ -363,7 +513,7 @@ need_resched:
14604 jz restore_all
14605 call preempt_schedule_irq
14606 jmp need_resched
14607-END(resume_kernel)
14608+ENDPROC(resume_kernel)
14609 #endif
14610 CFI_ENDPROC
14611 /*
14612@@ -397,23 +547,34 @@ sysenter_past_esp:
14613 /*CFI_REL_OFFSET cs, 0*/
14614 /*
14615 * Push current_thread_info()->sysenter_return to the stack.
14616- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
14617- * pushed above; +8 corresponds to copy_thread's esp0 setting.
14618 */
14619- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
14620+ pushl_cfi $0
14621 CFI_REL_OFFSET eip, 0
14622
14623 pushl_cfi %eax
14624 SAVE_ALL
14625+ GET_THREAD_INFO(%ebp)
14626+ movl TI_sysenter_return(%ebp),%ebp
14627+ movl %ebp,PT_EIP(%esp)
14628 ENABLE_INTERRUPTS(CLBR_NONE)
14629
14630 /*
14631 * Load the potential sixth argument from user stack.
14632 * Careful about security.
14633 */
14634+ movl PT_OLDESP(%esp),%ebp
14635+
14636+#ifdef CONFIG_PAX_MEMORY_UDEREF
14637+ mov PT_OLDSS(%esp),%ds
14638+1: movl %ds:(%ebp),%ebp
14639+ push %ss
14640+ pop %ds
14641+#else
14642 cmpl $__PAGE_OFFSET-3,%ebp
14643 jae syscall_fault
14644 1: movl (%ebp),%ebp
14645+#endif
14646+
14647 movl %ebp,PT_EBP(%esp)
14648 .section __ex_table,"a"
14649 .align 4
14650@@ -436,12 +597,24 @@ sysenter_do_call:
14651 testl $_TIF_ALLWORK_MASK, %ecx
14652 jne sysexit_audit
14653 sysenter_exit:
14654+
14655+#ifdef CONFIG_PAX_RANDKSTACK
14656+ pushl_cfi %eax
14657+ movl %esp, %eax
14658+ call pax_randomize_kstack
14659+ popl_cfi %eax
14660+#endif
14661+
14662+ pax_erase_kstack
14663+
14664 /* if something modifies registers it must also disable sysexit */
14665 movl PT_EIP(%esp), %edx
14666 movl PT_OLDESP(%esp), %ecx
14667 xorl %ebp,%ebp
14668 TRACE_IRQS_ON
14669 1: mov PT_FS(%esp), %fs
14670+2: mov PT_DS(%esp), %ds
14671+3: mov PT_ES(%esp), %es
14672 PTGS_TO_GS
14673 ENABLE_INTERRUPTS_SYSEXIT
14674
14675@@ -458,6 +631,9 @@ sysenter_audit:
14676 movl %eax,%edx /* 2nd arg: syscall number */
14677 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
14678 call __audit_syscall_entry
14679+
14680+ pax_erase_kstack
14681+
14682 pushl_cfi %ebx
14683 movl PT_EAX(%esp),%eax /* reload syscall number */
14684 jmp sysenter_do_call
14685@@ -483,11 +659,17 @@ sysexit_audit:
14686
14687 CFI_ENDPROC
14688 .pushsection .fixup,"ax"
14689-2: movl $0,PT_FS(%esp)
14690+4: movl $0,PT_FS(%esp)
14691+ jmp 1b
14692+5: movl $0,PT_DS(%esp)
14693+ jmp 1b
14694+6: movl $0,PT_ES(%esp)
14695 jmp 1b
14696 .section __ex_table,"a"
14697 .align 4
14698- .long 1b,2b
14699+ .long 1b,4b
14700+ .long 2b,5b
14701+ .long 3b,6b
14702 .popsection
14703 PTGS_TO_GS_EX
14704 ENDPROC(ia32_sysenter_target)
14705@@ -520,6 +702,15 @@ syscall_exit:
14706 testl $_TIF_ALLWORK_MASK, %ecx # current->work
14707 jne syscall_exit_work
14708
14709+restore_all_pax:
14710+
14711+#ifdef CONFIG_PAX_RANDKSTACK
14712+ movl %esp, %eax
14713+ call pax_randomize_kstack
14714+#endif
14715+
14716+ pax_erase_kstack
14717+
14718 restore_all:
14719 TRACE_IRQS_IRET
14720 restore_all_notrace:
14721@@ -579,14 +770,34 @@ ldt_ss:
14722 * compensating for the offset by changing to the ESPFIX segment with
14723 * a base address that matches for the difference.
14724 */
14725-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
14726+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
14727 mov %esp, %edx /* load kernel esp */
14728 mov PT_OLDESP(%esp), %eax /* load userspace esp */
14729 mov %dx, %ax /* eax: new kernel esp */
14730 sub %eax, %edx /* offset (low word is 0) */
14731+#ifdef CONFIG_SMP
14732+ movl PER_CPU_VAR(cpu_number), %ebx
14733+ shll $PAGE_SHIFT_asm, %ebx
14734+ addl $cpu_gdt_table, %ebx
14735+#else
14736+ movl $cpu_gdt_table, %ebx
14737+#endif
14738 shr $16, %edx
14739- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
14740- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
14741+
14742+#ifdef CONFIG_PAX_KERNEXEC
14743+ mov %cr0, %esi
14744+ btr $16, %esi
14745+ mov %esi, %cr0
14746+#endif
14747+
14748+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
14749+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
14750+
14751+#ifdef CONFIG_PAX_KERNEXEC
14752+ bts $16, %esi
14753+ mov %esi, %cr0
14754+#endif
14755+
14756 pushl_cfi $__ESPFIX_SS
14757 pushl_cfi %eax /* new kernel esp */
14758 /* Disable interrupts, but do not irqtrace this section: we
14759@@ -615,38 +826,30 @@ work_resched:
14760 movl TI_flags(%ebp), %ecx
14761 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
14762 # than syscall tracing?
14763- jz restore_all
14764+ jz restore_all_pax
14765 testb $_TIF_NEED_RESCHED, %cl
14766 jnz work_resched
14767
14768 work_notifysig: # deal with pending signals and
14769 # notify-resume requests
14770+ movl %esp, %eax
14771 #ifdef CONFIG_VM86
14772 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
14773- movl %esp, %eax
14774- jne work_notifysig_v86 # returning to kernel-space or
14775+ jz 1f # returning to kernel-space or
14776 # vm86-space
14777- TRACE_IRQS_ON
14778- ENABLE_INTERRUPTS(CLBR_NONE)
14779- xorl %edx, %edx
14780- call do_notify_resume
14781- jmp resume_userspace_sig
14782
14783- ALIGN
14784-work_notifysig_v86:
14785 pushl_cfi %ecx # save ti_flags for do_notify_resume
14786 call save_v86_state # %eax contains pt_regs pointer
14787 popl_cfi %ecx
14788 movl %eax, %esp
14789-#else
14790- movl %esp, %eax
14791+1:
14792 #endif
14793 TRACE_IRQS_ON
14794 ENABLE_INTERRUPTS(CLBR_NONE)
14795 xorl %edx, %edx
14796 call do_notify_resume
14797 jmp resume_userspace_sig
14798-END(work_pending)
14799+ENDPROC(work_pending)
14800
14801 # perform syscall exit tracing
14802 ALIGN
14803@@ -654,11 +857,14 @@ syscall_trace_entry:
14804 movl $-ENOSYS,PT_EAX(%esp)
14805 movl %esp, %eax
14806 call syscall_trace_enter
14807+
14808+ pax_erase_kstack
14809+
14810 /* What it returned is what we'll actually use. */
14811 cmpl $(NR_syscalls), %eax
14812 jnae syscall_call
14813 jmp syscall_exit
14814-END(syscall_trace_entry)
14815+ENDPROC(syscall_trace_entry)
14816
14817 # perform syscall exit tracing
14818 ALIGN
14819@@ -671,20 +877,24 @@ syscall_exit_work:
14820 movl %esp, %eax
14821 call syscall_trace_leave
14822 jmp resume_userspace
14823-END(syscall_exit_work)
14824+ENDPROC(syscall_exit_work)
14825 CFI_ENDPROC
14826
14827 RING0_INT_FRAME # can't unwind into user space anyway
14828 syscall_fault:
14829+#ifdef CONFIG_PAX_MEMORY_UDEREF
14830+ push %ss
14831+ pop %ds
14832+#endif
14833 GET_THREAD_INFO(%ebp)
14834 movl $-EFAULT,PT_EAX(%esp)
14835 jmp resume_userspace
14836-END(syscall_fault)
14837+ENDPROC(syscall_fault)
14838
14839 syscall_badsys:
14840 movl $-ENOSYS,PT_EAX(%esp)
14841 jmp resume_userspace
14842-END(syscall_badsys)
14843+ENDPROC(syscall_badsys)
14844 CFI_ENDPROC
14845 /*
14846 * End of kprobes section
14847@@ -756,6 +966,36 @@ ENTRY(ptregs_clone)
14848 CFI_ENDPROC
14849 ENDPROC(ptregs_clone)
14850
14851+ ALIGN;
14852+ENTRY(kernel_execve)
14853+ CFI_STARTPROC
14854+ pushl_cfi %ebp
14855+ sub $PT_OLDSS+4,%esp
14856+ pushl_cfi %edi
14857+ pushl_cfi %ecx
14858+ pushl_cfi %eax
14859+ lea 3*4(%esp),%edi
14860+ mov $PT_OLDSS/4+1,%ecx
14861+ xorl %eax,%eax
14862+ rep stosl
14863+ popl_cfi %eax
14864+ popl_cfi %ecx
14865+ popl_cfi %edi
14866+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
14867+ pushl_cfi %esp
14868+ call sys_execve
14869+ add $4,%esp
14870+ CFI_ADJUST_CFA_OFFSET -4
14871+ GET_THREAD_INFO(%ebp)
14872+ test %eax,%eax
14873+ jz syscall_exit
14874+ add $PT_OLDSS+4,%esp
14875+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
14876+ popl_cfi %ebp
14877+ ret
14878+ CFI_ENDPROC
14879+ENDPROC(kernel_execve)
14880+
14881 .macro FIXUP_ESPFIX_STACK
14882 /*
14883 * Switch back for ESPFIX stack to the normal zerobased stack
14884@@ -765,8 +1005,15 @@ ENDPROC(ptregs_clone)
14885 * normal stack and adjusts ESP with the matching offset.
14886 */
14887 /* fixup the stack */
14888- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
14889- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
14890+#ifdef CONFIG_SMP
14891+ movl PER_CPU_VAR(cpu_number), %ebx
14892+ shll $PAGE_SHIFT_asm, %ebx
14893+ addl $cpu_gdt_table, %ebx
14894+#else
14895+ movl $cpu_gdt_table, %ebx
14896+#endif
14897+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
14898+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
14899 shl $16, %eax
14900 addl %esp, %eax /* the adjusted stack pointer */
14901 pushl_cfi $__KERNEL_DS
14902@@ -819,7 +1066,7 @@ vector=vector+1
14903 .endr
14904 2: jmp common_interrupt
14905 .endr
14906-END(irq_entries_start)
14907+ENDPROC(irq_entries_start)
14908
14909 .previous
14910 END(interrupt)
14911@@ -867,7 +1114,7 @@ ENTRY(coprocessor_error)
14912 pushl_cfi $do_coprocessor_error
14913 jmp error_code
14914 CFI_ENDPROC
14915-END(coprocessor_error)
14916+ENDPROC(coprocessor_error)
14917
14918 ENTRY(simd_coprocessor_error)
14919 RING0_INT_FRAME
14920@@ -888,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
14921 #endif
14922 jmp error_code
14923 CFI_ENDPROC
14924-END(simd_coprocessor_error)
14925+ENDPROC(simd_coprocessor_error)
14926
14927 ENTRY(device_not_available)
14928 RING0_INT_FRAME
14929@@ -896,7 +1143,7 @@ ENTRY(device_not_available)
14930 pushl_cfi $do_device_not_available
14931 jmp error_code
14932 CFI_ENDPROC
14933-END(device_not_available)
14934+ENDPROC(device_not_available)
14935
14936 #ifdef CONFIG_PARAVIRT
14937 ENTRY(native_iret)
14938@@ -905,12 +1152,12 @@ ENTRY(native_iret)
14939 .align 4
14940 .long native_iret, iret_exc
14941 .previous
14942-END(native_iret)
14943+ENDPROC(native_iret)
14944
14945 ENTRY(native_irq_enable_sysexit)
14946 sti
14947 sysexit
14948-END(native_irq_enable_sysexit)
14949+ENDPROC(native_irq_enable_sysexit)
14950 #endif
14951
14952 ENTRY(overflow)
14953@@ -919,7 +1166,7 @@ ENTRY(overflow)
14954 pushl_cfi $do_overflow
14955 jmp error_code
14956 CFI_ENDPROC
14957-END(overflow)
14958+ENDPROC(overflow)
14959
14960 ENTRY(bounds)
14961 RING0_INT_FRAME
14962@@ -927,7 +1174,7 @@ ENTRY(bounds)
14963 pushl_cfi $do_bounds
14964 jmp error_code
14965 CFI_ENDPROC
14966-END(bounds)
14967+ENDPROC(bounds)
14968
14969 ENTRY(invalid_op)
14970 RING0_INT_FRAME
14971@@ -935,7 +1182,7 @@ ENTRY(invalid_op)
14972 pushl_cfi $do_invalid_op
14973 jmp error_code
14974 CFI_ENDPROC
14975-END(invalid_op)
14976+ENDPROC(invalid_op)
14977
14978 ENTRY(coprocessor_segment_overrun)
14979 RING0_INT_FRAME
14980@@ -943,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
14981 pushl_cfi $do_coprocessor_segment_overrun
14982 jmp error_code
14983 CFI_ENDPROC
14984-END(coprocessor_segment_overrun)
14985+ENDPROC(coprocessor_segment_overrun)
14986
14987 ENTRY(invalid_TSS)
14988 RING0_EC_FRAME
14989 pushl_cfi $do_invalid_TSS
14990 jmp error_code
14991 CFI_ENDPROC
14992-END(invalid_TSS)
14993+ENDPROC(invalid_TSS)
14994
14995 ENTRY(segment_not_present)
14996 RING0_EC_FRAME
14997 pushl_cfi $do_segment_not_present
14998 jmp error_code
14999 CFI_ENDPROC
15000-END(segment_not_present)
15001+ENDPROC(segment_not_present)
15002
15003 ENTRY(stack_segment)
15004 RING0_EC_FRAME
15005 pushl_cfi $do_stack_segment
15006 jmp error_code
15007 CFI_ENDPROC
15008-END(stack_segment)
15009+ENDPROC(stack_segment)
15010
15011 ENTRY(alignment_check)
15012 RING0_EC_FRAME
15013 pushl_cfi $do_alignment_check
15014 jmp error_code
15015 CFI_ENDPROC
15016-END(alignment_check)
15017+ENDPROC(alignment_check)
15018
15019 ENTRY(divide_error)
15020 RING0_INT_FRAME
15021@@ -979,7 +1226,7 @@ ENTRY(divide_error)
15022 pushl_cfi $do_divide_error
15023 jmp error_code
15024 CFI_ENDPROC
15025-END(divide_error)
15026+ENDPROC(divide_error)
15027
15028 #ifdef CONFIG_X86_MCE
15029 ENTRY(machine_check)
15030@@ -988,7 +1235,7 @@ ENTRY(machine_check)
15031 pushl_cfi machine_check_vector
15032 jmp error_code
15033 CFI_ENDPROC
15034-END(machine_check)
15035+ENDPROC(machine_check)
15036 #endif
15037
15038 ENTRY(spurious_interrupt_bug)
15039@@ -997,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
15040 pushl_cfi $do_spurious_interrupt_bug
15041 jmp error_code
15042 CFI_ENDPROC
15043-END(spurious_interrupt_bug)
15044+ENDPROC(spurious_interrupt_bug)
15045 /*
15046 * End of kprobes section
15047 */
15048@@ -1112,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
15049
15050 ENTRY(mcount)
15051 ret
15052-END(mcount)
15053+ENDPROC(mcount)
15054
15055 ENTRY(ftrace_caller)
15056 cmpl $0, function_trace_stop
15057@@ -1141,7 +1388,7 @@ ftrace_graph_call:
15058 .globl ftrace_stub
15059 ftrace_stub:
15060 ret
15061-END(ftrace_caller)
15062+ENDPROC(ftrace_caller)
15063
15064 #else /* ! CONFIG_DYNAMIC_FTRACE */
15065
15066@@ -1177,7 +1424,7 @@ trace:
15067 popl %ecx
15068 popl %eax
15069 jmp ftrace_stub
15070-END(mcount)
15071+ENDPROC(mcount)
15072 #endif /* CONFIG_DYNAMIC_FTRACE */
15073 #endif /* CONFIG_FUNCTION_TRACER */
15074
15075@@ -1198,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
15076 popl %ecx
15077 popl %eax
15078 ret
15079-END(ftrace_graph_caller)
15080+ENDPROC(ftrace_graph_caller)
15081
15082 .globl return_to_handler
15083 return_to_handler:
15084@@ -1253,15 +1500,18 @@ error_code:
15085 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15086 REG_TO_PTGS %ecx
15087 SET_KERNEL_GS %ecx
15088- movl $(__USER_DS), %ecx
15089+ movl $(__KERNEL_DS), %ecx
15090 movl %ecx, %ds
15091 movl %ecx, %es
15092+
15093+ pax_enter_kernel
15094+
15095 TRACE_IRQS_OFF
15096 movl %esp,%eax # pt_regs pointer
15097 call *%edi
15098 jmp ret_from_exception
15099 CFI_ENDPROC
15100-END(page_fault)
15101+ENDPROC(page_fault)
15102
15103 /*
15104 * Debug traps and NMI can happen at the one SYSENTER instruction
15105@@ -1303,7 +1553,7 @@ debug_stack_correct:
15106 call do_debug
15107 jmp ret_from_exception
15108 CFI_ENDPROC
15109-END(debug)
15110+ENDPROC(debug)
15111
15112 /*
15113 * NMI is doubly nasty. It can happen _while_ we're handling
15114@@ -1340,6 +1590,9 @@ nmi_stack_correct:
15115 xorl %edx,%edx # zero error code
15116 movl %esp,%eax # pt_regs pointer
15117 call do_nmi
15118+
15119+ pax_exit_kernel
15120+
15121 jmp restore_all_notrace
15122 CFI_ENDPROC
15123
15124@@ -1376,12 +1629,15 @@ nmi_espfix_stack:
15125 FIXUP_ESPFIX_STACK # %eax == %esp
15126 xorl %edx,%edx # zero error code
15127 call do_nmi
15128+
15129+ pax_exit_kernel
15130+
15131 RESTORE_REGS
15132 lss 12+4(%esp), %esp # back to espfix stack
15133 CFI_ADJUST_CFA_OFFSET -24
15134 jmp irq_return
15135 CFI_ENDPROC
15136-END(nmi)
15137+ENDPROC(nmi)
15138
15139 ENTRY(int3)
15140 RING0_INT_FRAME
15141@@ -1393,14 +1649,14 @@ ENTRY(int3)
15142 call do_int3
15143 jmp ret_from_exception
15144 CFI_ENDPROC
15145-END(int3)
15146+ENDPROC(int3)
15147
15148 ENTRY(general_protection)
15149 RING0_EC_FRAME
15150 pushl_cfi $do_general_protection
15151 jmp error_code
15152 CFI_ENDPROC
15153-END(general_protection)
15154+ENDPROC(general_protection)
15155
15156 #ifdef CONFIG_KVM_GUEST
15157 ENTRY(async_page_fault)
15158@@ -1408,7 +1664,7 @@ ENTRY(async_page_fault)
15159 pushl_cfi $do_async_page_fault
15160 jmp error_code
15161 CFI_ENDPROC
15162-END(async_page_fault)
15163+ENDPROC(async_page_fault)
15164 #endif
15165
15166 /*
15167diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15168index 1333d98..b340ca2 100644
15169--- a/arch/x86/kernel/entry_64.S
15170+++ b/arch/x86/kernel/entry_64.S
15171@@ -56,6 +56,8 @@
15172 #include <asm/ftrace.h>
15173 #include <asm/percpu.h>
15174 #include <linux/err.h>
15175+#include <asm/pgtable.h>
15176+#include <asm/alternative-asm.h>
15177
15178 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15179 #include <linux/elf-em.h>
15180@@ -69,8 +71,9 @@
15181 #ifdef CONFIG_FUNCTION_TRACER
15182 #ifdef CONFIG_DYNAMIC_FTRACE
15183 ENTRY(mcount)
15184+ pax_force_retaddr
15185 retq
15186-END(mcount)
15187+ENDPROC(mcount)
15188
15189 ENTRY(ftrace_caller)
15190 cmpl $0, function_trace_stop
15191@@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
15192 #endif
15193
15194 GLOBAL(ftrace_stub)
15195+ pax_force_retaddr
15196 retq
15197-END(ftrace_caller)
15198+ENDPROC(ftrace_caller)
15199
15200 #else /* ! CONFIG_DYNAMIC_FTRACE */
15201 ENTRY(mcount)
15202@@ -113,6 +117,7 @@ ENTRY(mcount)
15203 #endif
15204
15205 GLOBAL(ftrace_stub)
15206+ pax_force_retaddr
15207 retq
15208
15209 trace:
15210@@ -122,12 +127,13 @@ trace:
15211 movq 8(%rbp), %rsi
15212 subq $MCOUNT_INSN_SIZE, %rdi
15213
15214+ pax_force_fptr ftrace_trace_function
15215 call *ftrace_trace_function
15216
15217 MCOUNT_RESTORE_FRAME
15218
15219 jmp ftrace_stub
15220-END(mcount)
15221+ENDPROC(mcount)
15222 #endif /* CONFIG_DYNAMIC_FTRACE */
15223 #endif /* CONFIG_FUNCTION_TRACER */
15224
15225@@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
15226
15227 MCOUNT_RESTORE_FRAME
15228
15229+ pax_force_retaddr
15230 retq
15231-END(ftrace_graph_caller)
15232+ENDPROC(ftrace_graph_caller)
15233
15234 GLOBAL(return_to_handler)
15235 subq $24, %rsp
15236@@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
15237 movq 8(%rsp), %rdx
15238 movq (%rsp), %rax
15239 addq $24, %rsp
15240+ pax_force_fptr %rdi
15241 jmp *%rdi
15242 #endif
15243
15244@@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
15245 ENDPROC(native_usergs_sysret64)
15246 #endif /* CONFIG_PARAVIRT */
15247
15248+ .macro ljmpq sel, off
15249+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15250+ .byte 0x48; ljmp *1234f(%rip)
15251+ .pushsection .rodata
15252+ .align 16
15253+ 1234: .quad \off; .word \sel
15254+ .popsection
15255+#else
15256+ pushq $\sel
15257+ pushq $\off
15258+ lretq
15259+#endif
15260+ .endm
15261+
15262+ .macro pax_enter_kernel
15263+ pax_set_fptr_mask
15264+#ifdef CONFIG_PAX_KERNEXEC
15265+ call pax_enter_kernel
15266+#endif
15267+ .endm
15268+
15269+ .macro pax_exit_kernel
15270+#ifdef CONFIG_PAX_KERNEXEC
15271+ call pax_exit_kernel
15272+#endif
15273+ .endm
15274+
15275+#ifdef CONFIG_PAX_KERNEXEC
15276+ENTRY(pax_enter_kernel)
15277+ pushq %rdi
15278+
15279+#ifdef CONFIG_PARAVIRT
15280+ PV_SAVE_REGS(CLBR_RDI)
15281+#endif
15282+
15283+ GET_CR0_INTO_RDI
15284+ bts $16,%rdi
15285+ jnc 3f
15286+ mov %cs,%edi
15287+ cmp $__KERNEL_CS,%edi
15288+ jnz 2f
15289+1:
15290+
15291+#ifdef CONFIG_PARAVIRT
15292+ PV_RESTORE_REGS(CLBR_RDI)
15293+#endif
15294+
15295+ popq %rdi
15296+ pax_force_retaddr
15297+ retq
15298+
15299+2: ljmpq __KERNEL_CS,1f
15300+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15301+4: SET_RDI_INTO_CR0
15302+ jmp 1b
15303+ENDPROC(pax_enter_kernel)
15304+
15305+ENTRY(pax_exit_kernel)
15306+ pushq %rdi
15307+
15308+#ifdef CONFIG_PARAVIRT
15309+ PV_SAVE_REGS(CLBR_RDI)
15310+#endif
15311+
15312+ mov %cs,%rdi
15313+ cmp $__KERNEXEC_KERNEL_CS,%edi
15314+ jz 2f
15315+1:
15316+
15317+#ifdef CONFIG_PARAVIRT
15318+ PV_RESTORE_REGS(CLBR_RDI);
15319+#endif
15320+
15321+ popq %rdi
15322+ pax_force_retaddr
15323+ retq
15324+
15325+2: GET_CR0_INTO_RDI
15326+ btr $16,%rdi
15327+ ljmpq __KERNEL_CS,3f
15328+3: SET_RDI_INTO_CR0
15329+ jmp 1b
15330+#ifdef CONFIG_PARAVIRT
15331+ PV_RESTORE_REGS(CLBR_RDI);
15332+#endif
15333+
15334+ popq %rdi
15335+ pax_force_retaddr
15336+ retq
15337+ENDPROC(pax_exit_kernel)
15338+#endif
15339+
15340+ .macro pax_enter_kernel_user
15341+ pax_set_fptr_mask
15342+#ifdef CONFIG_PAX_MEMORY_UDEREF
15343+ call pax_enter_kernel_user
15344+#endif
15345+ .endm
15346+
15347+ .macro pax_exit_kernel_user
15348+#ifdef CONFIG_PAX_MEMORY_UDEREF
15349+ call pax_exit_kernel_user
15350+#endif
15351+#ifdef CONFIG_PAX_RANDKSTACK
15352+ pushq %rax
15353+ call pax_randomize_kstack
15354+ popq %rax
15355+#endif
15356+ .endm
15357+
15358+#ifdef CONFIG_PAX_MEMORY_UDEREF
15359+ENTRY(pax_enter_kernel_user)
15360+ pushq %rdi
15361+ pushq %rbx
15362+
15363+#ifdef CONFIG_PARAVIRT
15364+ PV_SAVE_REGS(CLBR_RDI)
15365+#endif
15366+
15367+ GET_CR3_INTO_RDI
15368+ mov %rdi,%rbx
15369+ add $__START_KERNEL_map,%rbx
15370+ sub phys_base(%rip),%rbx
15371+
15372+#ifdef CONFIG_PARAVIRT
15373+ pushq %rdi
15374+ cmpl $0, pv_info+PARAVIRT_enabled
15375+ jz 1f
15376+ i = 0
15377+ .rept USER_PGD_PTRS
15378+ mov i*8(%rbx),%rsi
15379+ mov $0,%sil
15380+ lea i*8(%rbx),%rdi
15381+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15382+ i = i + 1
15383+ .endr
15384+ jmp 2f
15385+1:
15386+#endif
15387+
15388+ i = 0
15389+ .rept USER_PGD_PTRS
15390+ movb $0,i*8(%rbx)
15391+ i = i + 1
15392+ .endr
15393+
15394+#ifdef CONFIG_PARAVIRT
15395+2: popq %rdi
15396+#endif
15397+ SET_RDI_INTO_CR3
15398+
15399+#ifdef CONFIG_PAX_KERNEXEC
15400+ GET_CR0_INTO_RDI
15401+ bts $16,%rdi
15402+ SET_RDI_INTO_CR0
15403+#endif
15404+
15405+#ifdef CONFIG_PARAVIRT
15406+ PV_RESTORE_REGS(CLBR_RDI)
15407+#endif
15408+
15409+ popq %rbx
15410+ popq %rdi
15411+ pax_force_retaddr
15412+ retq
15413+ENDPROC(pax_enter_kernel_user)
15414+
15415+ENTRY(pax_exit_kernel_user)
15416+ push %rdi
15417+
15418+#ifdef CONFIG_PARAVIRT
15419+ pushq %rbx
15420+ PV_SAVE_REGS(CLBR_RDI)
15421+#endif
15422+
15423+#ifdef CONFIG_PAX_KERNEXEC
15424+ GET_CR0_INTO_RDI
15425+ btr $16,%rdi
15426+ SET_RDI_INTO_CR0
15427+#endif
15428+
15429+ GET_CR3_INTO_RDI
15430+ add $__START_KERNEL_map,%rdi
15431+ sub phys_base(%rip),%rdi
15432+
15433+#ifdef CONFIG_PARAVIRT
15434+ cmpl $0, pv_info+PARAVIRT_enabled
15435+ jz 1f
15436+ mov %rdi,%rbx
15437+ i = 0
15438+ .rept USER_PGD_PTRS
15439+ mov i*8(%rbx),%rsi
15440+ mov $0x67,%sil
15441+ lea i*8(%rbx),%rdi
15442+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15443+ i = i + 1
15444+ .endr
15445+ jmp 2f
15446+1:
15447+#endif
15448+
15449+ i = 0
15450+ .rept USER_PGD_PTRS
15451+ movb $0x67,i*8(%rdi)
15452+ i = i + 1
15453+ .endr
15454+
15455+#ifdef CONFIG_PARAVIRT
15456+2: PV_RESTORE_REGS(CLBR_RDI)
15457+ popq %rbx
15458+#endif
15459+
15460+ popq %rdi
15461+ pax_force_retaddr
15462+ retq
15463+ENDPROC(pax_exit_kernel_user)
15464+#endif
15465+
15466+.macro pax_erase_kstack
15467+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15468+ call pax_erase_kstack
15469+#endif
15470+.endm
15471+
15472+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15473+/*
15474+ * r11: thread_info
15475+ * rcx, rdx: can be clobbered
15476+ */
15477+ENTRY(pax_erase_kstack)
15478+ pushq %rdi
15479+ pushq %rax
15480+ pushq %r11
15481+
15482+ GET_THREAD_INFO(%r11)
15483+ mov TI_lowest_stack(%r11), %rdi
15484+ mov $-0xBEEF, %rax
15485+ std
15486+
15487+1: mov %edi, %ecx
15488+ and $THREAD_SIZE_asm - 1, %ecx
15489+ shr $3, %ecx
15490+ repne scasq
15491+ jecxz 2f
15492+
15493+ cmp $2*8, %ecx
15494+ jc 2f
15495+
15496+ mov $2*8, %ecx
15497+ repe scasq
15498+ jecxz 2f
15499+ jne 1b
15500+
15501+2: cld
15502+ mov %esp, %ecx
15503+ sub %edi, %ecx
15504+
15505+ cmp $THREAD_SIZE_asm, %rcx
15506+ jb 3f
15507+ ud2
15508+3:
15509+
15510+ shr $3, %ecx
15511+ rep stosq
15512+
15513+ mov TI_task_thread_sp0(%r11), %rdi
15514+ sub $256, %rdi
15515+ mov %rdi, TI_lowest_stack(%r11)
15516+
15517+ popq %r11
15518+ popq %rax
15519+ popq %rdi
15520+ pax_force_retaddr
15521+ ret
15522+ENDPROC(pax_erase_kstack)
15523+#endif
15524
15525 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15526 #ifdef CONFIG_TRACE_IRQFLAGS
15527@@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
15528 .endm
15529
15530 .macro UNFAKE_STACK_FRAME
15531- addq $8*6, %rsp
15532- CFI_ADJUST_CFA_OFFSET -(6*8)
15533+ addq $8*6 + ARG_SKIP, %rsp
15534+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
15535 .endm
15536
15537 /*
15538@@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
15539 movq %rsp, %rsi
15540
15541 leaq -RBP(%rsp),%rdi /* arg1 for handler */
15542- testl $3, CS(%rdi)
15543+ testb $3, CS(%rdi)
15544 je 1f
15545 SWAPGS
15546 /*
15547@@ -356,9 +640,10 @@ ENTRY(save_rest)
15548 movq_cfi r15, R15+16
15549 movq %r11, 8(%rsp) /* return address */
15550 FIXUP_TOP_OF_STACK %r11, 16
15551+ pax_force_retaddr
15552 ret
15553 CFI_ENDPROC
15554-END(save_rest)
15555+ENDPROC(save_rest)
15556
15557 /* save complete stack frame */
15558 .pushsection .kprobes.text, "ax"
15559@@ -387,9 +672,10 @@ ENTRY(save_paranoid)
15560 js 1f /* negative -> in kernel */
15561 SWAPGS
15562 xorl %ebx,%ebx
15563-1: ret
15564+1: pax_force_retaddr_bts
15565+ ret
15566 CFI_ENDPROC
15567-END(save_paranoid)
15568+ENDPROC(save_paranoid)
15569 .popsection
15570
15571 /*
15572@@ -411,7 +697,7 @@ ENTRY(ret_from_fork)
15573
15574 RESTORE_REST
15575
15576- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15577+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15578 jz retint_restore_args
15579
15580 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
15581@@ -421,7 +707,7 @@ ENTRY(ret_from_fork)
15582 jmp ret_from_sys_call # go to the SYSRET fastpath
15583
15584 CFI_ENDPROC
15585-END(ret_from_fork)
15586+ENDPROC(ret_from_fork)
15587
15588 /*
15589 * System call entry. Up to 6 arguments in registers are supported.
15590@@ -457,7 +743,7 @@ END(ret_from_fork)
15591 ENTRY(system_call)
15592 CFI_STARTPROC simple
15593 CFI_SIGNAL_FRAME
15594- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
15595+ CFI_DEF_CFA rsp,0
15596 CFI_REGISTER rip,rcx
15597 /*CFI_REGISTER rflags,r11*/
15598 SWAPGS_UNSAFE_STACK
15599@@ -470,21 +756,23 @@ GLOBAL(system_call_after_swapgs)
15600
15601 movq %rsp,PER_CPU_VAR(old_rsp)
15602 movq PER_CPU_VAR(kernel_stack),%rsp
15603+ SAVE_ARGS 8*6,0
15604+ pax_enter_kernel_user
15605 /*
15606 * No need to follow this irqs off/on section - it's straight
15607 * and short:
15608 */
15609 ENABLE_INTERRUPTS(CLBR_NONE)
15610- SAVE_ARGS 8,0
15611 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15612 movq %rcx,RIP-ARGOFFSET(%rsp)
15613 CFI_REL_OFFSET rip,RIP-ARGOFFSET
15614- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15615+ GET_THREAD_INFO(%rcx)
15616+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
15617 jnz tracesys
15618 system_call_fastpath:
15619 cmpq $__NR_syscall_max,%rax
15620 ja badsys
15621- movq %r10,%rcx
15622+ movq R10-ARGOFFSET(%rsp),%rcx
15623 call *sys_call_table(,%rax,8) # XXX: rip relative
15624 movq %rax,RAX-ARGOFFSET(%rsp)
15625 /*
15626@@ -498,10 +786,13 @@ sysret_check:
15627 LOCKDEP_SYS_EXIT
15628 DISABLE_INTERRUPTS(CLBR_NONE)
15629 TRACE_IRQS_OFF
15630- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
15631+ GET_THREAD_INFO(%rcx)
15632+ movl TI_flags(%rcx),%edx
15633 andl %edi,%edx
15634 jnz sysret_careful
15635 CFI_REMEMBER_STATE
15636+ pax_exit_kernel_user
15637+ pax_erase_kstack
15638 /*
15639 * sysretq will re-enable interrupts:
15640 */
15641@@ -553,14 +844,18 @@ badsys:
15642 * jump back to the normal fast path.
15643 */
15644 auditsys:
15645- movq %r10,%r9 /* 6th arg: 4th syscall arg */
15646+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
15647 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
15648 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
15649 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
15650 movq %rax,%rsi /* 2nd arg: syscall number */
15651 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
15652 call __audit_syscall_entry
15653+
15654+ pax_erase_kstack
15655+
15656 LOAD_ARGS 0 /* reload call-clobbered registers */
15657+ pax_set_fptr_mask
15658 jmp system_call_fastpath
15659
15660 /*
15661@@ -581,7 +876,7 @@ sysret_audit:
15662 /* Do syscall tracing */
15663 tracesys:
15664 #ifdef CONFIG_AUDITSYSCALL
15665- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15666+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
15667 jz auditsys
15668 #endif
15669 SAVE_REST
15670@@ -589,16 +884,20 @@ tracesys:
15671 FIXUP_TOP_OF_STACK %rdi
15672 movq %rsp,%rdi
15673 call syscall_trace_enter
15674+
15675+ pax_erase_kstack
15676+
15677 /*
15678 * Reload arg registers from stack in case ptrace changed them.
15679 * We don't reload %rax because syscall_trace_enter() returned
15680 * the value it wants us to use in the table lookup.
15681 */
15682 LOAD_ARGS ARGOFFSET, 1
15683+ pax_set_fptr_mask
15684 RESTORE_REST
15685 cmpq $__NR_syscall_max,%rax
15686 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
15687- movq %r10,%rcx /* fixup for C */
15688+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
15689 call *sys_call_table(,%rax,8)
15690 movq %rax,RAX-ARGOFFSET(%rsp)
15691 /* Use IRET because user could have changed frame */
15692@@ -619,6 +918,7 @@ GLOBAL(int_with_check)
15693 andl %edi,%edx
15694 jnz int_careful
15695 andl $~TS_COMPAT,TI_status(%rcx)
15696+ pax_erase_kstack
15697 jmp retint_swapgs
15698
15699 /* Either reschedule or signal or syscall exit tracking needed. */
15700@@ -665,7 +965,7 @@ int_restore_rest:
15701 TRACE_IRQS_OFF
15702 jmp int_with_check
15703 CFI_ENDPROC
15704-END(system_call)
15705+ENDPROC(system_call)
15706
15707 /*
15708 * Certain special system calls that need to save a complete full stack frame.
15709@@ -681,7 +981,7 @@ ENTRY(\label)
15710 call \func
15711 jmp ptregscall_common
15712 CFI_ENDPROC
15713-END(\label)
15714+ENDPROC(\label)
15715 .endm
15716
15717 PTREGSCALL stub_clone, sys_clone, %r8
15718@@ -699,9 +999,10 @@ ENTRY(ptregscall_common)
15719 movq_cfi_restore R12+8, r12
15720 movq_cfi_restore RBP+8, rbp
15721 movq_cfi_restore RBX+8, rbx
15722+ pax_force_retaddr
15723 ret $REST_SKIP /* pop extended registers */
15724 CFI_ENDPROC
15725-END(ptregscall_common)
15726+ENDPROC(ptregscall_common)
15727
15728 ENTRY(stub_execve)
15729 CFI_STARTPROC
15730@@ -716,7 +1017,7 @@ ENTRY(stub_execve)
15731 RESTORE_REST
15732 jmp int_ret_from_sys_call
15733 CFI_ENDPROC
15734-END(stub_execve)
15735+ENDPROC(stub_execve)
15736
15737 /*
15738 * sigreturn is special because it needs to restore all registers on return.
15739@@ -734,7 +1035,7 @@ ENTRY(stub_rt_sigreturn)
15740 RESTORE_REST
15741 jmp int_ret_from_sys_call
15742 CFI_ENDPROC
15743-END(stub_rt_sigreturn)
15744+ENDPROC(stub_rt_sigreturn)
15745
15746 /*
15747 * Build the entry stubs and pointer table with some assembler magic.
15748@@ -769,7 +1070,7 @@ vector=vector+1
15749 2: jmp common_interrupt
15750 .endr
15751 CFI_ENDPROC
15752-END(irq_entries_start)
15753+ENDPROC(irq_entries_start)
15754
15755 .previous
15756 END(interrupt)
15757@@ -789,6 +1090,16 @@ END(interrupt)
15758 subq $ORIG_RAX-RBP, %rsp
15759 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
15760 SAVE_ARGS_IRQ
15761+#ifdef CONFIG_PAX_MEMORY_UDEREF
15762+ testb $3, CS(%rdi)
15763+ jnz 1f
15764+ pax_enter_kernel
15765+ jmp 2f
15766+1: pax_enter_kernel_user
15767+2:
15768+#else
15769+ pax_enter_kernel
15770+#endif
15771 call \func
15772 .endm
15773
15774@@ -820,7 +1131,7 @@ ret_from_intr:
15775
15776 exit_intr:
15777 GET_THREAD_INFO(%rcx)
15778- testl $3,CS-ARGOFFSET(%rsp)
15779+ testb $3,CS-ARGOFFSET(%rsp)
15780 je retint_kernel
15781
15782 /* Interrupt came from user space */
15783@@ -842,12 +1153,15 @@ retint_swapgs: /* return to user-space */
15784 * The iretq could re-enable interrupts:
15785 */
15786 DISABLE_INTERRUPTS(CLBR_ANY)
15787+ pax_exit_kernel_user
15788 TRACE_IRQS_IRETQ
15789 SWAPGS
15790 jmp restore_args
15791
15792 retint_restore_args: /* return to kernel space */
15793 DISABLE_INTERRUPTS(CLBR_ANY)
15794+ pax_exit_kernel
15795+ pax_force_retaddr RIP-ARGOFFSET
15796 /*
15797 * The iretq could re-enable interrupts:
15798 */
15799@@ -936,7 +1250,7 @@ ENTRY(retint_kernel)
15800 #endif
15801
15802 CFI_ENDPROC
15803-END(common_interrupt)
15804+ENDPROC(common_interrupt)
15805 /*
15806 * End of kprobes section
15807 */
15808@@ -953,7 +1267,7 @@ ENTRY(\sym)
15809 interrupt \do_sym
15810 jmp ret_from_intr
15811 CFI_ENDPROC
15812-END(\sym)
15813+ENDPROC(\sym)
15814 .endm
15815
15816 #ifdef CONFIG_SMP
15817@@ -1026,12 +1340,22 @@ ENTRY(\sym)
15818 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15819 call error_entry
15820 DEFAULT_FRAME 0
15821+#ifdef CONFIG_PAX_MEMORY_UDEREF
15822+ testb $3, CS(%rsp)
15823+ jnz 1f
15824+ pax_enter_kernel
15825+ jmp 2f
15826+1: pax_enter_kernel_user
15827+2:
15828+#else
15829+ pax_enter_kernel
15830+#endif
15831 movq %rsp,%rdi /* pt_regs pointer */
15832 xorl %esi,%esi /* no error code */
15833 call \do_sym
15834 jmp error_exit /* %ebx: no swapgs flag */
15835 CFI_ENDPROC
15836-END(\sym)
15837+ENDPROC(\sym)
15838 .endm
15839
15840 .macro paranoidzeroentry sym do_sym
15841@@ -1043,15 +1367,25 @@ ENTRY(\sym)
15842 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15843 call save_paranoid
15844 TRACE_IRQS_OFF
15845+#ifdef CONFIG_PAX_MEMORY_UDEREF
15846+ testb $3, CS(%rsp)
15847+ jnz 1f
15848+ pax_enter_kernel
15849+ jmp 2f
15850+1: pax_enter_kernel_user
15851+2:
15852+#else
15853+ pax_enter_kernel
15854+#endif
15855 movq %rsp,%rdi /* pt_regs pointer */
15856 xorl %esi,%esi /* no error code */
15857 call \do_sym
15858 jmp paranoid_exit /* %ebx: no swapgs flag */
15859 CFI_ENDPROC
15860-END(\sym)
15861+ENDPROC(\sym)
15862 .endm
15863
15864-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
15865+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
15866 .macro paranoidzeroentry_ist sym do_sym ist
15867 ENTRY(\sym)
15868 INTR_FRAME
15869@@ -1061,14 +1395,30 @@ ENTRY(\sym)
15870 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15871 call save_paranoid
15872 TRACE_IRQS_OFF
15873+#ifdef CONFIG_PAX_MEMORY_UDEREF
15874+ testb $3, CS(%rsp)
15875+ jnz 1f
15876+ pax_enter_kernel
15877+ jmp 2f
15878+1: pax_enter_kernel_user
15879+2:
15880+#else
15881+ pax_enter_kernel
15882+#endif
15883 movq %rsp,%rdi /* pt_regs pointer */
15884 xorl %esi,%esi /* no error code */
15885+#ifdef CONFIG_SMP
15886+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
15887+ lea init_tss(%r12), %r12
15888+#else
15889+ lea init_tss(%rip), %r12
15890+#endif
15891 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
15892 call \do_sym
15893 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
15894 jmp paranoid_exit /* %ebx: no swapgs flag */
15895 CFI_ENDPROC
15896-END(\sym)
15897+ENDPROC(\sym)
15898 .endm
15899
15900 .macro errorentry sym do_sym
15901@@ -1079,13 +1429,23 @@ ENTRY(\sym)
15902 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
15903 call error_entry
15904 DEFAULT_FRAME 0
15905+#ifdef CONFIG_PAX_MEMORY_UDEREF
15906+ testb $3, CS(%rsp)
15907+ jnz 1f
15908+ pax_enter_kernel
15909+ jmp 2f
15910+1: pax_enter_kernel_user
15911+2:
15912+#else
15913+ pax_enter_kernel
15914+#endif
15915 movq %rsp,%rdi /* pt_regs pointer */
15916 movq ORIG_RAX(%rsp),%rsi /* get error code */
15917 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
15918 call \do_sym
15919 jmp error_exit /* %ebx: no swapgs flag */
15920 CFI_ENDPROC
15921-END(\sym)
15922+ENDPROC(\sym)
15923 .endm
15924
15925 /* error code is on the stack already */
15926@@ -1098,13 +1458,23 @@ ENTRY(\sym)
15927 call save_paranoid
15928 DEFAULT_FRAME 0
15929 TRACE_IRQS_OFF
15930+#ifdef CONFIG_PAX_MEMORY_UDEREF
15931+ testb $3, CS(%rsp)
15932+ jnz 1f
15933+ pax_enter_kernel
15934+ jmp 2f
15935+1: pax_enter_kernel_user
15936+2:
15937+#else
15938+ pax_enter_kernel
15939+#endif
15940 movq %rsp,%rdi /* pt_regs pointer */
15941 movq ORIG_RAX(%rsp),%rsi /* get error code */
15942 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
15943 call \do_sym
15944 jmp paranoid_exit /* %ebx: no swapgs flag */
15945 CFI_ENDPROC
15946-END(\sym)
15947+ENDPROC(\sym)
15948 .endm
15949
15950 zeroentry divide_error do_divide_error
15951@@ -1134,9 +1504,10 @@ gs_change:
15952 2: mfence /* workaround */
15953 SWAPGS
15954 popfq_cfi
15955+ pax_force_retaddr
15956 ret
15957 CFI_ENDPROC
15958-END(native_load_gs_index)
15959+ENDPROC(native_load_gs_index)
15960
15961 .section __ex_table,"a"
15962 .align 8
15963@@ -1158,13 +1529,14 @@ ENTRY(kernel_thread_helper)
15964 * Here we are in the child and the registers are set as they were
15965 * at kernel_thread() invocation in the parent.
15966 */
15967+ pax_force_fptr %rsi
15968 call *%rsi
15969 # exit
15970 mov %eax, %edi
15971 call do_exit
15972 ud2 # padding for call trace
15973 CFI_ENDPROC
15974-END(kernel_thread_helper)
15975+ENDPROC(kernel_thread_helper)
15976
15977 /*
15978 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
15979@@ -1191,11 +1563,11 @@ ENTRY(kernel_execve)
15980 RESTORE_REST
15981 testq %rax,%rax
15982 je int_ret_from_sys_call
15983- RESTORE_ARGS
15984 UNFAKE_STACK_FRAME
15985+ pax_force_retaddr
15986 ret
15987 CFI_ENDPROC
15988-END(kernel_execve)
15989+ENDPROC(kernel_execve)
15990
15991 /* Call softirq on interrupt stack. Interrupts are off. */
15992 ENTRY(call_softirq)
15993@@ -1213,9 +1585,10 @@ ENTRY(call_softirq)
15994 CFI_DEF_CFA_REGISTER rsp
15995 CFI_ADJUST_CFA_OFFSET -8
15996 decl PER_CPU_VAR(irq_count)
15997+ pax_force_retaddr
15998 ret
15999 CFI_ENDPROC
16000-END(call_softirq)
16001+ENDPROC(call_softirq)
16002
16003 #ifdef CONFIG_XEN
16004 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16005@@ -1253,7 +1626,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16006 decl PER_CPU_VAR(irq_count)
16007 jmp error_exit
16008 CFI_ENDPROC
16009-END(xen_do_hypervisor_callback)
16010+ENDPROC(xen_do_hypervisor_callback)
16011
16012 /*
16013 * Hypervisor uses this for application faults while it executes.
16014@@ -1312,7 +1685,7 @@ ENTRY(xen_failsafe_callback)
16015 SAVE_ALL
16016 jmp error_exit
16017 CFI_ENDPROC
16018-END(xen_failsafe_callback)
16019+ENDPROC(xen_failsafe_callback)
16020
16021 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16022 xen_hvm_callback_vector xen_evtchn_do_upcall
16023@@ -1361,16 +1734,31 @@ ENTRY(paranoid_exit)
16024 TRACE_IRQS_OFF
16025 testl %ebx,%ebx /* swapgs needed? */
16026 jnz paranoid_restore
16027- testl $3,CS(%rsp)
16028+ testb $3,CS(%rsp)
16029 jnz paranoid_userspace
16030+#ifdef CONFIG_PAX_MEMORY_UDEREF
16031+ pax_exit_kernel
16032+ TRACE_IRQS_IRETQ 0
16033+ SWAPGS_UNSAFE_STACK
16034+ RESTORE_ALL 8
16035+ pax_force_retaddr_bts
16036+ jmp irq_return
16037+#endif
16038 paranoid_swapgs:
16039+#ifdef CONFIG_PAX_MEMORY_UDEREF
16040+ pax_exit_kernel_user
16041+#else
16042+ pax_exit_kernel
16043+#endif
16044 TRACE_IRQS_IRETQ 0
16045 SWAPGS_UNSAFE_STACK
16046 RESTORE_ALL 8
16047 jmp irq_return
16048 paranoid_restore:
16049+ pax_exit_kernel
16050 TRACE_IRQS_IRETQ 0
16051 RESTORE_ALL 8
16052+ pax_force_retaddr_bts
16053 jmp irq_return
16054 paranoid_userspace:
16055 GET_THREAD_INFO(%rcx)
16056@@ -1399,7 +1787,7 @@ paranoid_schedule:
16057 TRACE_IRQS_OFF
16058 jmp paranoid_userspace
16059 CFI_ENDPROC
16060-END(paranoid_exit)
16061+ENDPROC(paranoid_exit)
16062
16063 /*
16064 * Exception entry point. This expects an error code/orig_rax on the stack.
16065@@ -1426,12 +1814,13 @@ ENTRY(error_entry)
16066 movq_cfi r14, R14+8
16067 movq_cfi r15, R15+8
16068 xorl %ebx,%ebx
16069- testl $3,CS+8(%rsp)
16070+ testb $3,CS+8(%rsp)
16071 je error_kernelspace
16072 error_swapgs:
16073 SWAPGS
16074 error_sti:
16075 TRACE_IRQS_OFF
16076+ pax_force_retaddr_bts
16077 ret
16078
16079 /*
16080@@ -1458,7 +1847,7 @@ bstep_iret:
16081 movq %rcx,RIP+8(%rsp)
16082 jmp error_swapgs
16083 CFI_ENDPROC
16084-END(error_entry)
16085+ENDPROC(error_entry)
16086
16087
16088 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16089@@ -1478,7 +1867,7 @@ ENTRY(error_exit)
16090 jnz retint_careful
16091 jmp retint_swapgs
16092 CFI_ENDPROC
16093-END(error_exit)
16094+ENDPROC(error_exit)
16095
16096 /*
16097 * Test if a given stack is an NMI stack or not.
16098@@ -1535,9 +1924,11 @@ ENTRY(nmi)
16099 * If %cs was not the kernel segment, then the NMI triggered in user
16100 * space, which means it is definitely not nested.
16101 */
16102+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16103+ je 1f
16104 cmpl $__KERNEL_CS, 16(%rsp)
16105 jne first_nmi
16106-
16107+1:
16108 /*
16109 * Check the special variable on the stack to see if NMIs are
16110 * executing.
16111@@ -1659,6 +2050,16 @@ restart_nmi:
16112 */
16113 call save_paranoid
16114 DEFAULT_FRAME 0
16115+#ifdef CONFIG_PAX_MEMORY_UDEREF
16116+ testb $3, CS(%rsp)
16117+ jnz 1f
16118+ pax_enter_kernel
16119+ jmp 2f
16120+1: pax_enter_kernel_user
16121+2:
16122+#else
16123+ pax_enter_kernel
16124+#endif
16125 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16126 movq %rsp,%rdi
16127 movq $-1,%rsi
16128@@ -1666,14 +2067,25 @@ restart_nmi:
16129 testl %ebx,%ebx /* swapgs needed? */
16130 jnz nmi_restore
16131 nmi_swapgs:
16132+#ifdef CONFIG_PAX_MEMORY_UDEREF
16133+ pax_exit_kernel_user
16134+#else
16135+ pax_exit_kernel
16136+#endif
16137 SWAPGS_UNSAFE_STACK
16138+ RESTORE_ALL 8
16139+ /* Clear the NMI executing stack variable */
16140+ movq $0, 10*8(%rsp)
16141+ jmp irq_return
16142 nmi_restore:
16143+ pax_exit_kernel
16144 RESTORE_ALL 8
16145+ pax_force_retaddr_bts
16146 /* Clear the NMI executing stack variable */
16147 movq $0, 10*8(%rsp)
16148 jmp irq_return
16149 CFI_ENDPROC
16150-END(nmi)
16151+ENDPROC(nmi)
16152
16153 /*
16154 * If an NMI hit an iret because of an exception or breakpoint,
16155@@ -1700,7 +2112,7 @@ ENTRY(ignore_sysret)
16156 mov $-ENOSYS,%eax
16157 sysret
16158 CFI_ENDPROC
16159-END(ignore_sysret)
16160+ENDPROC(ignore_sysret)
16161
16162 /*
16163 * End of kprobes section
16164diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16165index c9a281f..ce2f317 100644
16166--- a/arch/x86/kernel/ftrace.c
16167+++ b/arch/x86/kernel/ftrace.c
16168@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16169 static const void *mod_code_newcode; /* holds the text to write to the IP */
16170
16171 static unsigned nmi_wait_count;
16172-static atomic_t nmi_update_count = ATOMIC_INIT(0);
16173+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16174
16175 int ftrace_arch_read_dyn_info(char *buf, int size)
16176 {
16177@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16178
16179 r = snprintf(buf, size, "%u %u",
16180 nmi_wait_count,
16181- atomic_read(&nmi_update_count));
16182+ atomic_read_unchecked(&nmi_update_count));
16183 return r;
16184 }
16185
16186@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
16187
16188 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16189 smp_rmb();
16190+ pax_open_kernel();
16191 ftrace_mod_code();
16192- atomic_inc(&nmi_update_count);
16193+ pax_close_kernel();
16194+ atomic_inc_unchecked(&nmi_update_count);
16195 }
16196 /* Must have previous changes seen before executions */
16197 smp_mb();
16198@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16199 {
16200 unsigned char replaced[MCOUNT_INSN_SIZE];
16201
16202+ ip = ktla_ktva(ip);
16203+
16204 /*
16205 * Note: Due to modules and __init, code can
16206 * disappear and change, we need to protect against faulting
16207@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16208 unsigned char old[MCOUNT_INSN_SIZE], *new;
16209 int ret;
16210
16211- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16212+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16213 new = ftrace_call_replace(ip, (unsigned long)func);
16214 ret = ftrace_modify_code(ip, old, new);
16215
16216@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16217 {
16218 unsigned char code[MCOUNT_INSN_SIZE];
16219
16220+ ip = ktla_ktva(ip);
16221+
16222 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16223 return -EFAULT;
16224
16225diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16226index 51ff186..9e77418 100644
16227--- a/arch/x86/kernel/head32.c
16228+++ b/arch/x86/kernel/head32.c
16229@@ -19,6 +19,7 @@
16230 #include <asm/io_apic.h>
16231 #include <asm/bios_ebda.h>
16232 #include <asm/tlbflush.h>
16233+#include <asm/boot.h>
16234
16235 static void __init i386_default_early_setup(void)
16236 {
16237@@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
16238
16239 void __init i386_start_kernel(void)
16240 {
16241- memblock_reserve(__pa_symbol(&_text),
16242- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16243+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16244
16245 #ifdef CONFIG_BLK_DEV_INITRD
16246 /* Reserve INITRD */
16247diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16248index ce0be7c..c41476e 100644
16249--- a/arch/x86/kernel/head_32.S
16250+++ b/arch/x86/kernel/head_32.S
16251@@ -25,6 +25,12 @@
16252 /* Physical address */
16253 #define pa(X) ((X) - __PAGE_OFFSET)
16254
16255+#ifdef CONFIG_PAX_KERNEXEC
16256+#define ta(X) (X)
16257+#else
16258+#define ta(X) ((X) - __PAGE_OFFSET)
16259+#endif
16260+
16261 /*
16262 * References to members of the new_cpu_data structure.
16263 */
16264@@ -54,11 +60,7 @@
16265 * and small than max_low_pfn, otherwise will waste some page table entries
16266 */
16267
16268-#if PTRS_PER_PMD > 1
16269-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16270-#else
16271-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16272-#endif
16273+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16274
16275 /* Number of possible pages in the lowmem region */
16276 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16277@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16278 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16279
16280 /*
16281+ * Real beginning of normal "text" segment
16282+ */
16283+ENTRY(stext)
16284+ENTRY(_stext)
16285+
16286+/*
16287 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16288 * %esi points to the real-mode code as a 32-bit pointer.
16289 * CS and DS must be 4 GB flat segments, but we don't depend on
16290@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16291 * can.
16292 */
16293 __HEAD
16294+
16295+#ifdef CONFIG_PAX_KERNEXEC
16296+ jmp startup_32
16297+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16298+.fill PAGE_SIZE-5,1,0xcc
16299+#endif
16300+
16301 ENTRY(startup_32)
16302 movl pa(stack_start),%ecx
16303
16304@@ -105,6 +120,57 @@ ENTRY(startup_32)
16305 2:
16306 leal -__PAGE_OFFSET(%ecx),%esp
16307
16308+#ifdef CONFIG_SMP
16309+ movl $pa(cpu_gdt_table),%edi
16310+ movl $__per_cpu_load,%eax
16311+ movw %ax,__KERNEL_PERCPU + 2(%edi)
16312+ rorl $16,%eax
16313+ movb %al,__KERNEL_PERCPU + 4(%edi)
16314+ movb %ah,__KERNEL_PERCPU + 7(%edi)
16315+ movl $__per_cpu_end - 1,%eax
16316+ subl $__per_cpu_start,%eax
16317+ movw %ax,__KERNEL_PERCPU + 0(%edi)
16318+#endif
16319+
16320+#ifdef CONFIG_PAX_MEMORY_UDEREF
16321+ movl $NR_CPUS,%ecx
16322+ movl $pa(cpu_gdt_table),%edi
16323+1:
16324+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16325+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16326+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16327+ addl $PAGE_SIZE_asm,%edi
16328+ loop 1b
16329+#endif
16330+
16331+#ifdef CONFIG_PAX_KERNEXEC
16332+ movl $pa(boot_gdt),%edi
16333+ movl $__LOAD_PHYSICAL_ADDR,%eax
16334+ movw %ax,__BOOT_CS + 2(%edi)
16335+ rorl $16,%eax
16336+ movb %al,__BOOT_CS + 4(%edi)
16337+ movb %ah,__BOOT_CS + 7(%edi)
16338+ rorl $16,%eax
16339+
16340+ ljmp $(__BOOT_CS),$1f
16341+1:
16342+
16343+ movl $NR_CPUS,%ecx
16344+ movl $pa(cpu_gdt_table),%edi
16345+ addl $__PAGE_OFFSET,%eax
16346+1:
16347+ movw %ax,__KERNEL_CS + 2(%edi)
16348+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16349+ rorl $16,%eax
16350+ movb %al,__KERNEL_CS + 4(%edi)
16351+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16352+ movb %ah,__KERNEL_CS + 7(%edi)
16353+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16354+ rorl $16,%eax
16355+ addl $PAGE_SIZE_asm,%edi
16356+ loop 1b
16357+#endif
16358+
16359 /*
16360 * Clear BSS first so that there are no surprises...
16361 */
16362@@ -195,8 +261,11 @@ ENTRY(startup_32)
16363 movl %eax, pa(max_pfn_mapped)
16364
16365 /* Do early initialization of the fixmap area */
16366- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16367- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16368+#ifdef CONFIG_COMPAT_VDSO
16369+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16370+#else
16371+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16372+#endif
16373 #else /* Not PAE */
16374
16375 page_pde_offset = (__PAGE_OFFSET >> 20);
16376@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16377 movl %eax, pa(max_pfn_mapped)
16378
16379 /* Do early initialization of the fixmap area */
16380- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16381- movl %eax,pa(initial_page_table+0xffc)
16382+#ifdef CONFIG_COMPAT_VDSO
16383+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16384+#else
16385+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16386+#endif
16387 #endif
16388
16389 #ifdef CONFIG_PARAVIRT
16390@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16391 cmpl $num_subarch_entries, %eax
16392 jae bad_subarch
16393
16394- movl pa(subarch_entries)(,%eax,4), %eax
16395- subl $__PAGE_OFFSET, %eax
16396- jmp *%eax
16397+ jmp *pa(subarch_entries)(,%eax,4)
16398
16399 bad_subarch:
16400 WEAK(lguest_entry)
16401@@ -255,10 +325,10 @@ WEAK(xen_entry)
16402 __INITDATA
16403
16404 subarch_entries:
16405- .long default_entry /* normal x86/PC */
16406- .long lguest_entry /* lguest hypervisor */
16407- .long xen_entry /* Xen hypervisor */
16408- .long default_entry /* Moorestown MID */
16409+ .long ta(default_entry) /* normal x86/PC */
16410+ .long ta(lguest_entry) /* lguest hypervisor */
16411+ .long ta(xen_entry) /* Xen hypervisor */
16412+ .long ta(default_entry) /* Moorestown MID */
16413 num_subarch_entries = (. - subarch_entries) / 4
16414 .previous
16415 #else
16416@@ -312,6 +382,7 @@ default_entry:
16417 orl %edx,%eax
16418 movl %eax,%cr4
16419
16420+#ifdef CONFIG_X86_PAE
16421 testb $X86_CR4_PAE, %al # check if PAE is enabled
16422 jz 6f
16423
16424@@ -340,6 +411,9 @@ default_entry:
16425 /* Make changes effective */
16426 wrmsr
16427
16428+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16429+#endif
16430+
16431 6:
16432
16433 /*
16434@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
16435 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16436 movl %eax,%ss # after changing gdt.
16437
16438- movl $(__USER_DS),%eax # DS/ES contains default USER segment
16439+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16440 movl %eax,%ds
16441 movl %eax,%es
16442
16443@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
16444 */
16445 cmpb $0,ready
16446 jne 1f
16447- movl $gdt_page,%eax
16448+ movl $cpu_gdt_table,%eax
16449 movl $stack_canary,%ecx
16450+#ifdef CONFIG_SMP
16451+ addl $__per_cpu_load,%ecx
16452+#endif
16453 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16454 shrl $16, %ecx
16455 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16456 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16457 1:
16458-#endif
16459 movl $(__KERNEL_STACK_CANARY),%eax
16460+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16461+ movl $(__USER_DS),%eax
16462+#else
16463+ xorl %eax,%eax
16464+#endif
16465 movl %eax,%gs
16466
16467 xorl %eax,%eax # Clear LDT
16468@@ -558,22 +639,22 @@ early_page_fault:
16469 jmp early_fault
16470
16471 early_fault:
16472- cld
16473 #ifdef CONFIG_PRINTK
16474+ cmpl $1,%ss:early_recursion_flag
16475+ je hlt_loop
16476+ incl %ss:early_recursion_flag
16477+ cld
16478 pusha
16479 movl $(__KERNEL_DS),%eax
16480 movl %eax,%ds
16481 movl %eax,%es
16482- cmpl $2,early_recursion_flag
16483- je hlt_loop
16484- incl early_recursion_flag
16485 movl %cr2,%eax
16486 pushl %eax
16487 pushl %edx /* trapno */
16488 pushl $fault_msg
16489 call printk
16490+; call dump_stack
16491 #endif
16492- call dump_stack
16493 hlt_loop:
16494 hlt
16495 jmp hlt_loop
16496@@ -581,8 +662,11 @@ hlt_loop:
16497 /* This is the default interrupt "handler" :-) */
16498 ALIGN
16499 ignore_int:
16500- cld
16501 #ifdef CONFIG_PRINTK
16502+ cmpl $2,%ss:early_recursion_flag
16503+ je hlt_loop
16504+ incl %ss:early_recursion_flag
16505+ cld
16506 pushl %eax
16507 pushl %ecx
16508 pushl %edx
16509@@ -591,9 +675,6 @@ ignore_int:
16510 movl $(__KERNEL_DS),%eax
16511 movl %eax,%ds
16512 movl %eax,%es
16513- cmpl $2,early_recursion_flag
16514- je hlt_loop
16515- incl early_recursion_flag
16516 pushl 16(%esp)
16517 pushl 24(%esp)
16518 pushl 32(%esp)
16519@@ -622,29 +703,43 @@ ENTRY(initial_code)
16520 /*
16521 * BSS section
16522 */
16523-__PAGE_ALIGNED_BSS
16524- .align PAGE_SIZE
16525 #ifdef CONFIG_X86_PAE
16526+.section .initial_pg_pmd,"a",@progbits
16527 initial_pg_pmd:
16528 .fill 1024*KPMDS,4,0
16529 #else
16530+.section .initial_page_table,"a",@progbits
16531 ENTRY(initial_page_table)
16532 .fill 1024,4,0
16533 #endif
16534+.section .initial_pg_fixmap,"a",@progbits
16535 initial_pg_fixmap:
16536 .fill 1024,4,0
16537+.section .empty_zero_page,"a",@progbits
16538 ENTRY(empty_zero_page)
16539 .fill 4096,1,0
16540+.section .swapper_pg_dir,"a",@progbits
16541 ENTRY(swapper_pg_dir)
16542+#ifdef CONFIG_X86_PAE
16543+ .fill 4,8,0
16544+#else
16545 .fill 1024,4,0
16546+#endif
16547+
16548+/*
16549+ * The IDT has to be page-aligned to simplify the Pentium
16550+ * F0 0F bug workaround.. We have a special link segment
16551+ * for this.
16552+ */
16553+.section .idt,"a",@progbits
16554+ENTRY(idt_table)
16555+ .fill 256,8,0
16556
16557 /*
16558 * This starts the data section.
16559 */
16560 #ifdef CONFIG_X86_PAE
16561-__PAGE_ALIGNED_DATA
16562- /* Page-aligned for the benefit of paravirt? */
16563- .align PAGE_SIZE
16564+.section .initial_page_table,"a",@progbits
16565 ENTRY(initial_page_table)
16566 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
16567 # if KPMDS == 3
16568@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
16569 # error "Kernel PMDs should be 1, 2 or 3"
16570 # endif
16571 .align PAGE_SIZE /* needs to be page-sized too */
16572+
16573+#ifdef CONFIG_PAX_PER_CPU_PGD
16574+ENTRY(cpu_pgd)
16575+ .rept NR_CPUS
16576+ .fill 4,8,0
16577+ .endr
16578+#endif
16579+
16580 #endif
16581
16582 .data
16583 .balign 4
16584 ENTRY(stack_start)
16585- .long init_thread_union+THREAD_SIZE
16586+ .long init_thread_union+THREAD_SIZE-8
16587
16588+ready: .byte 0
16589+
16590+.section .rodata,"a",@progbits
16591 early_recursion_flag:
16592 .long 0
16593
16594-ready: .byte 0
16595-
16596 int_msg:
16597 .asciz "Unknown interrupt or fault at: %p %p %p\n"
16598
16599@@ -707,7 +811,7 @@ fault_msg:
16600 .word 0 # 32 bit align gdt_desc.address
16601 boot_gdt_descr:
16602 .word __BOOT_DS+7
16603- .long boot_gdt - __PAGE_OFFSET
16604+ .long pa(boot_gdt)
16605
16606 .word 0 # 32-bit align idt_desc.address
16607 idt_descr:
16608@@ -718,7 +822,7 @@ idt_descr:
16609 .word 0 # 32 bit align gdt_desc.address
16610 ENTRY(early_gdt_descr)
16611 .word GDT_ENTRIES*8-1
16612- .long gdt_page /* Overwritten for secondary CPUs */
16613+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
16614
16615 /*
16616 * The boot_gdt must mirror the equivalent in setup.S and is
16617@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
16618 .align L1_CACHE_BYTES
16619 ENTRY(boot_gdt)
16620 .fill GDT_ENTRY_BOOT_CS,8,0
16621- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
16622- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
16623+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
16624+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
16625+
16626+ .align PAGE_SIZE_asm
16627+ENTRY(cpu_gdt_table)
16628+ .rept NR_CPUS
16629+ .quad 0x0000000000000000 /* NULL descriptor */
16630+ .quad 0x0000000000000000 /* 0x0b reserved */
16631+ .quad 0x0000000000000000 /* 0x13 reserved */
16632+ .quad 0x0000000000000000 /* 0x1b reserved */
16633+
16634+#ifdef CONFIG_PAX_KERNEXEC
16635+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
16636+#else
16637+ .quad 0x0000000000000000 /* 0x20 unused */
16638+#endif
16639+
16640+ .quad 0x0000000000000000 /* 0x28 unused */
16641+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
16642+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
16643+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
16644+ .quad 0x0000000000000000 /* 0x4b reserved */
16645+ .quad 0x0000000000000000 /* 0x53 reserved */
16646+ .quad 0x0000000000000000 /* 0x5b reserved */
16647+
16648+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
16649+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
16650+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
16651+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
16652+
16653+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
16654+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
16655+
16656+ /*
16657+ * Segments used for calling PnP BIOS have byte granularity.
16658+ * The code segments and data segments have fixed 64k limits,
16659+ * the transfer segment sizes are set at run time.
16660+ */
16661+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
16662+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
16663+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
16664+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
16665+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
16666+
16667+ /*
16668+ * The APM segments have byte granularity and their bases
16669+ * are set at run time. All have 64k limits.
16670+ */
16671+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
16672+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
16673+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
16674+
16675+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
16676+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
16677+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
16678+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
16679+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
16680+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
16681+
16682+ /* Be sure this is zeroed to avoid false validations in Xen */
16683+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
16684+ .endr
16685diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
16686index 40f4eb3..6d24d9d 100644
16687--- a/arch/x86/kernel/head_64.S
16688+++ b/arch/x86/kernel/head_64.S
16689@@ -19,6 +19,8 @@
16690 #include <asm/cache.h>
16691 #include <asm/processor-flags.h>
16692 #include <asm/percpu.h>
16693+#include <asm/cpufeature.h>
16694+#include <asm/alternative-asm.h>
16695
16696 #ifdef CONFIG_PARAVIRT
16697 #include <asm/asm-offsets.h>
16698@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
16699 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
16700 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
16701 L3_START_KERNEL = pud_index(__START_KERNEL_map)
16702+L4_VMALLOC_START = pgd_index(VMALLOC_START)
16703+L3_VMALLOC_START = pud_index(VMALLOC_START)
16704+L4_VMALLOC_END = pgd_index(VMALLOC_END)
16705+L3_VMALLOC_END = pud_index(VMALLOC_END)
16706+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
16707+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
16708
16709 .text
16710 __HEAD
16711@@ -85,35 +93,23 @@ startup_64:
16712 */
16713 addq %rbp, init_level4_pgt + 0(%rip)
16714 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
16715+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
16716+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
16717+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
16718 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
16719
16720 addq %rbp, level3_ident_pgt + 0(%rip)
16721+#ifndef CONFIG_XEN
16722+ addq %rbp, level3_ident_pgt + 8(%rip)
16723+#endif
16724
16725- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
16726- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
16727+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
16728+
16729+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
16730+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
16731
16732 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
16733-
16734- /* Add an Identity mapping if I am above 1G */
16735- leaq _text(%rip), %rdi
16736- andq $PMD_PAGE_MASK, %rdi
16737-
16738- movq %rdi, %rax
16739- shrq $PUD_SHIFT, %rax
16740- andq $(PTRS_PER_PUD - 1), %rax
16741- jz ident_complete
16742-
16743- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
16744- leaq level3_ident_pgt(%rip), %rbx
16745- movq %rdx, 0(%rbx, %rax, 8)
16746-
16747- movq %rdi, %rax
16748- shrq $PMD_SHIFT, %rax
16749- andq $(PTRS_PER_PMD - 1), %rax
16750- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
16751- leaq level2_spare_pgt(%rip), %rbx
16752- movq %rdx, 0(%rbx, %rax, 8)
16753-ident_complete:
16754+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
16755
16756 /*
16757 * Fixup the kernel text+data virtual addresses. Note that
16758@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
16759 * after the boot processor executes this code.
16760 */
16761
16762- /* Enable PAE mode and PGE */
16763- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
16764+ /* Enable PAE mode and PSE/PGE */
16765+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16766 movq %rax, %cr4
16767
16768 /* Setup early boot stage 4 level pagetables. */
16769@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
16770 movl $MSR_EFER, %ecx
16771 rdmsr
16772 btsl $_EFER_SCE, %eax /* Enable System Call */
16773- btl $20,%edi /* No Execute supported? */
16774+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
16775 jnc 1f
16776 btsl $_EFER_NX, %eax
16777+ leaq init_level4_pgt(%rip), %rdi
16778+#ifndef CONFIG_EFI
16779+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
16780+#endif
16781+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
16782+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
16783+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
16784+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
16785 1: wrmsr /* Make changes effective */
16786
16787 /* Setup cr0 */
16788@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
16789 * jump. In addition we need to ensure %cs is set so we make this
16790 * a far return.
16791 */
16792+ pax_set_fptr_mask
16793 movq initial_code(%rip),%rax
16794 pushq $0 # fake return address to stop unwinder
16795 pushq $__KERNEL_CS # set correct cs
16796@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
16797 bad_address:
16798 jmp bad_address
16799
16800- .section ".init.text","ax"
16801+ __INIT
16802 #ifdef CONFIG_EARLY_PRINTK
16803 .globl early_idt_handlers
16804 early_idt_handlers:
16805@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
16806 #endif /* EARLY_PRINTK */
16807 1: hlt
16808 jmp 1b
16809+ .previous
16810
16811 #ifdef CONFIG_EARLY_PRINTK
16812+ __INITDATA
16813 early_recursion_flag:
16814 .long 0
16815+ .previous
16816
16817+ .section .rodata,"a",@progbits
16818 early_idt_msg:
16819 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
16820 early_idt_ripmsg:
16821 .asciz "RIP %s\n"
16822+ .previous
16823 #endif /* CONFIG_EARLY_PRINTK */
16824- .previous
16825
16826+ .section .rodata,"a",@progbits
16827 #define NEXT_PAGE(name) \
16828 .balign PAGE_SIZE; \
16829 ENTRY(name)
16830@@ -338,7 +348,6 @@ ENTRY(name)
16831 i = i + 1 ; \
16832 .endr
16833
16834- .data
16835 /*
16836 * This default setting generates an ident mapping at address 0x100000
16837 * and a mapping for the kernel that precisely maps virtual address
16838@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
16839 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16840 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
16841 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16842+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
16843+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
16844+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
16845+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
16846+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
16847+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
16848 .org init_level4_pgt + L4_START_KERNEL*8, 0
16849 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
16850 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
16851
16852+#ifdef CONFIG_PAX_PER_CPU_PGD
16853+NEXT_PAGE(cpu_pgd)
16854+ .rept NR_CPUS
16855+ .fill 512,8,0
16856+ .endr
16857+#endif
16858+
16859 NEXT_PAGE(level3_ident_pgt)
16860 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
16861+#ifdef CONFIG_XEN
16862 .fill 511,8,0
16863+#else
16864+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
16865+ .fill 510,8,0
16866+#endif
16867+
16868+NEXT_PAGE(level3_vmalloc_start_pgt)
16869+ .fill 512,8,0
16870+
16871+NEXT_PAGE(level3_vmalloc_end_pgt)
16872+ .fill 512,8,0
16873+
16874+NEXT_PAGE(level3_vmemmap_pgt)
16875+ .fill L3_VMEMMAP_START,8,0
16876+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
16877
16878 NEXT_PAGE(level3_kernel_pgt)
16879 .fill L3_START_KERNEL,8,0
16880@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
16881 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
16882 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
16883
16884+NEXT_PAGE(level2_vmemmap_pgt)
16885+ .fill 512,8,0
16886+
16887 NEXT_PAGE(level2_fixmap_pgt)
16888- .fill 506,8,0
16889- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
16890- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
16891- .fill 5,8,0
16892+ .fill 507,8,0
16893+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
16894+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
16895+ .fill 4,8,0
16896
16897-NEXT_PAGE(level1_fixmap_pgt)
16898+NEXT_PAGE(level1_vsyscall_pgt)
16899 .fill 512,8,0
16900
16901-NEXT_PAGE(level2_ident_pgt)
16902- /* Since I easily can, map the first 1G.
16903+ /* Since I easily can, map the first 2G.
16904 * Don't set NX because code runs from these pages.
16905 */
16906- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
16907+NEXT_PAGE(level2_ident_pgt)
16908+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
16909
16910 NEXT_PAGE(level2_kernel_pgt)
16911 /*
16912@@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
16913 * If you want to increase this then increase MODULES_VADDR
16914 * too.)
16915 */
16916- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
16917- KERNEL_IMAGE_SIZE/PMD_SIZE)
16918-
16919-NEXT_PAGE(level2_spare_pgt)
16920- .fill 512, 8, 0
16921+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
16922
16923 #undef PMDS
16924 #undef NEXT_PAGE
16925
16926- .data
16927+ .align PAGE_SIZE
16928+ENTRY(cpu_gdt_table)
16929+ .rept NR_CPUS
16930+ .quad 0x0000000000000000 /* NULL descriptor */
16931+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
16932+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
16933+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
16934+ .quad 0x00cffb000000ffff /* __USER32_CS */
16935+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
16936+ .quad 0x00affb000000ffff /* __USER_CS */
16937+
16938+#ifdef CONFIG_PAX_KERNEXEC
16939+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
16940+#else
16941+ .quad 0x0 /* unused */
16942+#endif
16943+
16944+ .quad 0,0 /* TSS */
16945+ .quad 0,0 /* LDT */
16946+ .quad 0,0,0 /* three TLS descriptors */
16947+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
16948+ /* asm/segment.h:GDT_ENTRIES must match this */
16949+
16950+ /* zero the remaining page */
16951+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
16952+ .endr
16953+
16954 .align 16
16955 .globl early_gdt_descr
16956 early_gdt_descr:
16957 .word GDT_ENTRIES*8-1
16958 early_gdt_descr_base:
16959- .quad INIT_PER_CPU_VAR(gdt_page)
16960+ .quad cpu_gdt_table
16961
16962 ENTRY(phys_base)
16963 /* This must match the first entry in level2_kernel_pgt */
16964 .quad 0x0000000000000000
16965
16966 #include "../../x86/xen/xen-head.S"
16967-
16968- .section .bss, "aw", @nobits
16969+
16970+ .section .rodata,"a",@progbits
16971 .align L1_CACHE_BYTES
16972 ENTRY(idt_table)
16973- .skip IDT_ENTRIES * 16
16974+ .fill 512,8,0
16975
16976 .align L1_CACHE_BYTES
16977 ENTRY(nmi_idt_table)
16978- .skip IDT_ENTRIES * 16
16979+ .fill 512,8,0
16980
16981 __PAGE_ALIGNED_BSS
16982 .align PAGE_SIZE
16983diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
16984index 9c3bd4a..e1d9b35 100644
16985--- a/arch/x86/kernel/i386_ksyms_32.c
16986+++ b/arch/x86/kernel/i386_ksyms_32.c
16987@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
16988 EXPORT_SYMBOL(cmpxchg8b_emu);
16989 #endif
16990
16991+EXPORT_SYMBOL_GPL(cpu_gdt_table);
16992+
16993 /* Networking helper routines. */
16994 EXPORT_SYMBOL(csum_partial_copy_generic);
16995+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
16996+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
16997
16998 EXPORT_SYMBOL(__get_user_1);
16999 EXPORT_SYMBOL(__get_user_2);
17000@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17001
17002 EXPORT_SYMBOL(csum_partial);
17003 EXPORT_SYMBOL(empty_zero_page);
17004+
17005+#ifdef CONFIG_PAX_KERNEXEC
17006+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17007+#endif
17008diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
17009index 739d859..d1d6be7 100644
17010--- a/arch/x86/kernel/i387.c
17011+++ b/arch/x86/kernel/i387.c
17012@@ -188,6 +188,9 @@ int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
17013
17014 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
17015 unsigned int pos, unsigned int count,
17016+ void *kbuf, void __user *ubuf) __size_overflow(4);
17017+int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
17018+ unsigned int pos, unsigned int count,
17019 void *kbuf, void __user *ubuf)
17020 {
17021 int ret;
17022@@ -207,6 +210,9 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
17023
17024 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
17025 unsigned int pos, unsigned int count,
17026+ const void *kbuf, const void __user *ubuf) __size_overflow(4);
17027+int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
17028+ unsigned int pos, unsigned int count,
17029 const void *kbuf, const void __user *ubuf)
17030 {
17031 int ret;
17032@@ -240,6 +246,9 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
17033
17034 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
17035 unsigned int pos, unsigned int count,
17036+ void *kbuf, void __user *ubuf) __size_overflow(4);
17037+int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
17038+ unsigned int pos, unsigned int count,
17039 void *kbuf, void __user *ubuf)
17040 {
17041 int ret;
17042@@ -269,6 +278,9 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
17043
17044 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
17045 unsigned int pos, unsigned int count,
17046+ const void *kbuf, const void __user *ubuf) __size_overflow(4);
17047+int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
17048+ unsigned int pos, unsigned int count,
17049 const void *kbuf, const void __user *ubuf)
17050 {
17051 int ret;
17052@@ -439,6 +451,9 @@ static void convert_to_fxsr(struct task_struct *tsk,
17053
17054 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
17055 unsigned int pos, unsigned int count,
17056+ void *kbuf, void __user *ubuf) __size_overflow(3,4);
17057+int fpregs_get(struct task_struct *target, const struct user_regset *regset,
17058+ unsigned int pos, unsigned int count,
17059 void *kbuf, void __user *ubuf)
17060 {
17061 struct user_i387_ia32_struct env;
17062@@ -471,6 +486,9 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
17063
17064 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
17065 unsigned int pos, unsigned int count,
17066+ const void *kbuf, const void __user *ubuf) __size_overflow(3,4);
17067+int fpregs_set(struct task_struct *target, const struct user_regset *regset,
17068+ unsigned int pos, unsigned int count,
17069 const void *kbuf, const void __user *ubuf)
17070 {
17071 struct user_i387_ia32_struct env;
17072@@ -619,6 +637,8 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
17073 }
17074
17075 static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
17076+ unsigned int size) __size_overflow(2);
17077+static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
17078 unsigned int size)
17079 {
17080 struct task_struct *tsk = current;
17081diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17082index 6104852..6114160 100644
17083--- a/arch/x86/kernel/i8259.c
17084+++ b/arch/x86/kernel/i8259.c
17085@@ -210,7 +210,7 @@ spurious_8259A_irq:
17086 "spurious 8259A interrupt: IRQ%d.\n", irq);
17087 spurious_irq_mask |= irqmask;
17088 }
17089- atomic_inc(&irq_err_count);
17090+ atomic_inc_unchecked(&irq_err_count);
17091 /*
17092 * Theoretically we do not have to handle this IRQ,
17093 * but in Linux this does not cause problems and is
17094diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17095index 43e9ccf..44ccf6f 100644
17096--- a/arch/x86/kernel/init_task.c
17097+++ b/arch/x86/kernel/init_task.c
17098@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17099 * way process stacks are handled. This is done by having a special
17100 * "init_task" linker map entry..
17101 */
17102-union thread_union init_thread_union __init_task_data =
17103- { INIT_THREAD_INFO(init_task) };
17104+union thread_union init_thread_union __init_task_data;
17105
17106 /*
17107 * Initial task structure.
17108@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17109 * section. Since TSS's are completely CPU-local, we want them
17110 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17111 */
17112-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17113-
17114+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17115+EXPORT_SYMBOL(init_tss);
17116diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17117index 8c96897..be66bfa 100644
17118--- a/arch/x86/kernel/ioport.c
17119+++ b/arch/x86/kernel/ioport.c
17120@@ -6,6 +6,7 @@
17121 #include <linux/sched.h>
17122 #include <linux/kernel.h>
17123 #include <linux/capability.h>
17124+#include <linux/security.h>
17125 #include <linux/errno.h>
17126 #include <linux/types.h>
17127 #include <linux/ioport.h>
17128@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17129
17130 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17131 return -EINVAL;
17132+#ifdef CONFIG_GRKERNSEC_IO
17133+ if (turn_on && grsec_disable_privio) {
17134+ gr_handle_ioperm();
17135+ return -EPERM;
17136+ }
17137+#endif
17138 if (turn_on && !capable(CAP_SYS_RAWIO))
17139 return -EPERM;
17140
17141@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17142 * because the ->io_bitmap_max value must match the bitmap
17143 * contents:
17144 */
17145- tss = &per_cpu(init_tss, get_cpu());
17146+ tss = init_tss + get_cpu();
17147
17148 if (turn_on)
17149 bitmap_clear(t->io_bitmap_ptr, from, num);
17150@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
17151 return -EINVAL;
17152 /* Trying to gain more privileges? */
17153 if (level > old) {
17154+#ifdef CONFIG_GRKERNSEC_IO
17155+ if (grsec_disable_privio) {
17156+ gr_handle_iopl();
17157+ return -EPERM;
17158+ }
17159+#endif
17160 if (!capable(CAP_SYS_RAWIO))
17161 return -EPERM;
17162 }
17163diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17164index 7943e0c..dd32c5c 100644
17165--- a/arch/x86/kernel/irq.c
17166+++ b/arch/x86/kernel/irq.c
17167@@ -18,7 +18,7 @@
17168 #include <asm/mce.h>
17169 #include <asm/hw_irq.h>
17170
17171-atomic_t irq_err_count;
17172+atomic_unchecked_t irq_err_count;
17173
17174 /* Function pointer for generic interrupt vector handling */
17175 void (*x86_platform_ipi_callback)(void) = NULL;
17176@@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
17177 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17178 seq_printf(p, " Machine check polls\n");
17179 #endif
17180- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17181+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17182 #if defined(CONFIG_X86_IO_APIC)
17183- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17184+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17185 #endif
17186 return 0;
17187 }
17188@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17189
17190 u64 arch_irq_stat(void)
17191 {
17192- u64 sum = atomic_read(&irq_err_count);
17193+ u64 sum = atomic_read_unchecked(&irq_err_count);
17194
17195 #ifdef CONFIG_X86_IO_APIC
17196- sum += atomic_read(&irq_mis_count);
17197+ sum += atomic_read_unchecked(&irq_mis_count);
17198 #endif
17199 return sum;
17200 }
17201diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17202index 40fc861..9b8739b 100644
17203--- a/arch/x86/kernel/irq_32.c
17204+++ b/arch/x86/kernel/irq_32.c
17205@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
17206 __asm__ __volatile__("andl %%esp,%0" :
17207 "=r" (sp) : "0" (THREAD_SIZE - 1));
17208
17209- return sp < (sizeof(struct thread_info) + STACK_WARN);
17210+ return sp < STACK_WARN;
17211 }
17212
17213 static void print_stack_overflow(void)
17214@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
17215 * per-CPU IRQ handling contexts (thread information and stack)
17216 */
17217 union irq_ctx {
17218- struct thread_info tinfo;
17219- u32 stack[THREAD_SIZE/sizeof(u32)];
17220+ unsigned long previous_esp;
17221+ u32 stack[THREAD_SIZE/sizeof(u32)];
17222 } __attribute__((aligned(THREAD_SIZE)));
17223
17224 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17225@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
17226 static inline int
17227 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17228 {
17229- union irq_ctx *curctx, *irqctx;
17230+ union irq_ctx *irqctx;
17231 u32 *isp, arg1, arg2;
17232
17233- curctx = (union irq_ctx *) current_thread_info();
17234 irqctx = __this_cpu_read(hardirq_ctx);
17235
17236 /*
17237@@ -92,21 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17238 * handler) we can't do that and just have to keep using the
17239 * current stack (which is the irq stack already after all)
17240 */
17241- if (unlikely(curctx == irqctx))
17242+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17243 return 0;
17244
17245 /* build the stack frame on the IRQ stack */
17246- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17247- irqctx->tinfo.task = curctx->tinfo.task;
17248- irqctx->tinfo.previous_esp = current_stack_pointer;
17249+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17250+ irqctx->previous_esp = current_stack_pointer;
17251
17252- /*
17253- * Copy the softirq bits in preempt_count so that the
17254- * softirq checks work in the hardirq context.
17255- */
17256- irqctx->tinfo.preempt_count =
17257- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17258- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17259+#ifdef CONFIG_PAX_MEMORY_UDEREF
17260+ __set_fs(MAKE_MM_SEG(0));
17261+#endif
17262
17263 if (unlikely(overflow))
17264 call_on_stack(print_stack_overflow, isp);
17265@@ -118,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17266 : "0" (irq), "1" (desc), "2" (isp),
17267 "D" (desc->handle_irq)
17268 : "memory", "cc", "ecx");
17269+
17270+#ifdef CONFIG_PAX_MEMORY_UDEREF
17271+ __set_fs(current_thread_info()->addr_limit);
17272+#endif
17273+
17274 return 1;
17275 }
17276
17277@@ -126,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17278 */
17279 void __cpuinit irq_ctx_init(int cpu)
17280 {
17281- union irq_ctx *irqctx;
17282-
17283 if (per_cpu(hardirq_ctx, cpu))
17284 return;
17285
17286- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17287- THREAD_FLAGS,
17288- THREAD_ORDER));
17289- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17290- irqctx->tinfo.cpu = cpu;
17291- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17292- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17293-
17294- per_cpu(hardirq_ctx, cpu) = irqctx;
17295-
17296- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17297- THREAD_FLAGS,
17298- THREAD_ORDER));
17299- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17300- irqctx->tinfo.cpu = cpu;
17301- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17302-
17303- per_cpu(softirq_ctx, cpu) = irqctx;
17304+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17305+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17306
17307 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17308 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17309@@ -157,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
17310 asmlinkage void do_softirq(void)
17311 {
17312 unsigned long flags;
17313- struct thread_info *curctx;
17314 union irq_ctx *irqctx;
17315 u32 *isp;
17316
17317@@ -167,15 +147,22 @@ asmlinkage void do_softirq(void)
17318 local_irq_save(flags);
17319
17320 if (local_softirq_pending()) {
17321- curctx = current_thread_info();
17322 irqctx = __this_cpu_read(softirq_ctx);
17323- irqctx->tinfo.task = curctx->task;
17324- irqctx->tinfo.previous_esp = current_stack_pointer;
17325+ irqctx->previous_esp = current_stack_pointer;
17326
17327 /* build the stack frame on the softirq stack */
17328- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17329+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17330+
17331+#ifdef CONFIG_PAX_MEMORY_UDEREF
17332+ __set_fs(MAKE_MM_SEG(0));
17333+#endif
17334
17335 call_on_stack(__do_softirq, isp);
17336+
17337+#ifdef CONFIG_PAX_MEMORY_UDEREF
17338+ __set_fs(current_thread_info()->addr_limit);
17339+#endif
17340+
17341 /*
17342 * Shouldn't happen, we returned above if in_interrupt():
17343 */
17344diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17345index d04d3ec..ea4b374 100644
17346--- a/arch/x86/kernel/irq_64.c
17347+++ b/arch/x86/kernel/irq_64.c
17348@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17349 u64 estack_top, estack_bottom;
17350 u64 curbase = (u64)task_stack_page(current);
17351
17352- if (user_mode_vm(regs))
17353+ if (user_mode(regs))
17354 return;
17355
17356 if (regs->sp >= curbase + sizeof(struct thread_info) +
17357diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17358index faba577..93b9e71 100644
17359--- a/arch/x86/kernel/kgdb.c
17360+++ b/arch/x86/kernel/kgdb.c
17361@@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17362 #ifdef CONFIG_X86_32
17363 switch (regno) {
17364 case GDB_SS:
17365- if (!user_mode_vm(regs))
17366+ if (!user_mode(regs))
17367 *(unsigned long *)mem = __KERNEL_DS;
17368 break;
17369 case GDB_SP:
17370- if (!user_mode_vm(regs))
17371+ if (!user_mode(regs))
17372 *(unsigned long *)mem = kernel_stack_pointer(regs);
17373 break;
17374 case GDB_GS:
17375@@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17376 case 'k':
17377 /* clear the trace bit */
17378 linux_regs->flags &= ~X86_EFLAGS_TF;
17379- atomic_set(&kgdb_cpu_doing_single_step, -1);
17380+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17381
17382 /* set the trace bit if we're stepping */
17383 if (remcomInBuffer[0] == 's') {
17384 linux_regs->flags |= X86_EFLAGS_TF;
17385- atomic_set(&kgdb_cpu_doing_single_step,
17386+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17387 raw_smp_processor_id());
17388 }
17389
17390@@ -543,7 +543,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17391
17392 switch (cmd) {
17393 case DIE_DEBUG:
17394- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17395+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17396 if (user_mode(regs))
17397 return single_step_cont(regs, args);
17398 break;
17399diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17400index 7da647d..56fe348 100644
17401--- a/arch/x86/kernel/kprobes.c
17402+++ b/arch/x86/kernel/kprobes.c
17403@@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17404 } __attribute__((packed)) *insn;
17405
17406 insn = (struct __arch_relative_insn *)from;
17407+
17408+ pax_open_kernel();
17409 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17410 insn->op = op;
17411+ pax_close_kernel();
17412 }
17413
17414 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17415@@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17416 kprobe_opcode_t opcode;
17417 kprobe_opcode_t *orig_opcodes = opcodes;
17418
17419- if (search_exception_tables((unsigned long)opcodes))
17420+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17421 return 0; /* Page fault may occur on this address. */
17422
17423 retry:
17424@@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17425 }
17426 }
17427 insn_get_length(&insn);
17428+ pax_open_kernel();
17429 memcpy(dest, insn.kaddr, insn.length);
17430+ pax_close_kernel();
17431
17432 #ifdef CONFIG_X86_64
17433 if (insn_rip_relative(&insn)) {
17434@@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17435 (u8 *) dest;
17436 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17437 disp = (u8 *) dest + insn_offset_displacement(&insn);
17438+ pax_open_kernel();
17439 *(s32 *) disp = (s32) newdisp;
17440+ pax_close_kernel();
17441 }
17442 #endif
17443 return insn.length;
17444@@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
17445 */
17446 __copy_instruction(p->ainsn.insn, p->addr, 0);
17447
17448- if (can_boost(p->addr))
17449+ if (can_boost(ktla_ktva(p->addr)))
17450 p->ainsn.boostable = 0;
17451 else
17452 p->ainsn.boostable = -1;
17453
17454- p->opcode = *p->addr;
17455+ p->opcode = *(ktla_ktva(p->addr));
17456 }
17457
17458 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17459@@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17460 * nor set current_kprobe, because it doesn't use single
17461 * stepping.
17462 */
17463- regs->ip = (unsigned long)p->ainsn.insn;
17464+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17465 preempt_enable_no_resched();
17466 return;
17467 }
17468@@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17469 if (p->opcode == BREAKPOINT_INSTRUCTION)
17470 regs->ip = (unsigned long)p->addr;
17471 else
17472- regs->ip = (unsigned long)p->ainsn.insn;
17473+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17474 }
17475
17476 /*
17477@@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17478 setup_singlestep(p, regs, kcb, 0);
17479 return 1;
17480 }
17481- } else if (*addr != BREAKPOINT_INSTRUCTION) {
17482+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17483 /*
17484 * The breakpoint instruction was removed right
17485 * after we hit it. Another cpu has removed
17486@@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17487 " movq %rax, 152(%rsp)\n"
17488 RESTORE_REGS_STRING
17489 " popfq\n"
17490+#ifdef KERNEXEC_PLUGIN
17491+ " btsq $63,(%rsp)\n"
17492+#endif
17493 #else
17494 " pushf\n"
17495 SAVE_REGS_STRING
17496@@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
17497 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17498 {
17499 unsigned long *tos = stack_addr(regs);
17500- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17501+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17502 unsigned long orig_ip = (unsigned long)p->addr;
17503 kprobe_opcode_t *insn = p->ainsn.insn;
17504
17505@@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
17506 struct die_args *args = data;
17507 int ret = NOTIFY_DONE;
17508
17509- if (args->regs && user_mode_vm(args->regs))
17510+ if (args->regs && user_mode(args->regs))
17511 return ret;
17512
17513 switch (val) {
17514@@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17515 * Verify if the address gap is in 2GB range, because this uses
17516 * a relative jump.
17517 */
17518- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17519+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17520 if (abs(rel) > 0x7fffffff)
17521 return -ERANGE;
17522
17523@@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17524 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17525
17526 /* Set probe function call */
17527- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17528+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17529
17530 /* Set returning jmp instruction at the tail of out-of-line buffer */
17531 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17532- (u8 *)op->kp.addr + op->optinsn.size);
17533+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17534
17535 flush_icache_range((unsigned long) buf,
17536 (unsigned long) buf + TMPL_END_IDX +
17537@@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17538 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17539
17540 /* Backup instructions which will be replaced by jump address */
17541- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17542+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17543 RELATIVE_ADDR_SIZE);
17544
17545 insn_buf[0] = RELATIVEJUMP_OPCODE;
17546diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
17547index ea69726..a305f16 100644
17548--- a/arch/x86/kernel/ldt.c
17549+++ b/arch/x86/kernel/ldt.c
17550@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
17551 if (reload) {
17552 #ifdef CONFIG_SMP
17553 preempt_disable();
17554- load_LDT(pc);
17555+ load_LDT_nolock(pc);
17556 if (!cpumask_equal(mm_cpumask(current->mm),
17557 cpumask_of(smp_processor_id())))
17558 smp_call_function(flush_ldt, current->mm, 1);
17559 preempt_enable();
17560 #else
17561- load_LDT(pc);
17562+ load_LDT_nolock(pc);
17563 #endif
17564 }
17565 if (oldsize) {
17566@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
17567 return err;
17568
17569 for (i = 0; i < old->size; i++)
17570- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
17571+ write_ldt_entry(new->ldt, i, old->ldt + i);
17572 return 0;
17573 }
17574
17575@@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
17576 retval = copy_ldt(&mm->context, &old_mm->context);
17577 mutex_unlock(&old_mm->context.lock);
17578 }
17579+
17580+ if (tsk == current) {
17581+ mm->context.vdso = 0;
17582+
17583+#ifdef CONFIG_X86_32
17584+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17585+ mm->context.user_cs_base = 0UL;
17586+ mm->context.user_cs_limit = ~0UL;
17587+
17588+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17589+ cpus_clear(mm->context.cpu_user_cs_mask);
17590+#endif
17591+
17592+#endif
17593+#endif
17594+
17595+ }
17596+
17597 return retval;
17598 }
17599
17600@@ -141,6 +159,7 @@ void destroy_context(struct mm_struct *mm)
17601 }
17602 }
17603
17604+static int read_ldt(void __user *ptr, unsigned long bytecount) __size_overflow(2);
17605 static int read_ldt(void __user *ptr, unsigned long bytecount)
17606 {
17607 int err;
17608@@ -175,6 +194,7 @@ error_return:
17609 return err;
17610 }
17611
17612+static int read_default_ldt(void __user *ptr, unsigned long bytecount) __size_overflow(2);
17613 static int read_default_ldt(void __user *ptr, unsigned long bytecount)
17614 {
17615 /* CHECKME: Can we use _one_ random number ? */
17616@@ -230,6 +250,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
17617 }
17618 }
17619
17620+#ifdef CONFIG_PAX_SEGMEXEC
17621+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
17622+ error = -EINVAL;
17623+ goto out_unlock;
17624+ }
17625+#endif
17626+
17627 fill_ldt(&ldt, &ldt_info);
17628 if (oldmode)
17629 ldt.avl = 0;
17630diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
17631index a3fa43b..8966f4c 100644
17632--- a/arch/x86/kernel/machine_kexec_32.c
17633+++ b/arch/x86/kernel/machine_kexec_32.c
17634@@ -27,7 +27,7 @@
17635 #include <asm/cacheflush.h>
17636 #include <asm/debugreg.h>
17637
17638-static void set_idt(void *newidt, __u16 limit)
17639+static void set_idt(struct desc_struct *newidt, __u16 limit)
17640 {
17641 struct desc_ptr curidt;
17642
17643@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
17644 }
17645
17646
17647-static void set_gdt(void *newgdt, __u16 limit)
17648+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
17649 {
17650 struct desc_ptr curgdt;
17651
17652@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
17653 }
17654
17655 control_page = page_address(image->control_code_page);
17656- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
17657+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
17658
17659 relocate_kernel_ptr = control_page;
17660 page_list[PA_CONTROL_PAGE] = __pa(control_page);
17661diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
17662index 3ca42d0..79d24cd 100644
17663--- a/arch/x86/kernel/microcode_intel.c
17664+++ b/arch/x86/kernel/microcode_intel.c
17665@@ -434,15 +434,16 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
17666 return ret;
17667 }
17668
17669+static int get_ucode_user(void *to, const void *from, size_t n) __size_overflow(3);
17670 static int get_ucode_user(void *to, const void *from, size_t n)
17671 {
17672- return copy_from_user(to, from, n);
17673+ return copy_from_user(to, (const void __force_user *)from, n);
17674 }
17675
17676 static enum ucode_state
17677 request_microcode_user(int cpu, const void __user *buf, size_t size)
17678 {
17679- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
17680+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
17681 }
17682
17683 static void microcode_fini_cpu(int cpu)
17684diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
17685index 925179f..1f0d561 100644
17686--- a/arch/x86/kernel/module.c
17687+++ b/arch/x86/kernel/module.c
17688@@ -36,15 +36,61 @@
17689 #define DEBUGP(fmt...)
17690 #endif
17691
17692-void *module_alloc(unsigned long size)
17693+static inline void *__module_alloc(unsigned long size, pgprot_t prot) __size_overflow(1);
17694+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
17695 {
17696- if (PAGE_ALIGN(size) > MODULES_LEN)
17697+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
17698 return NULL;
17699 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
17700- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
17701+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
17702 -1, __builtin_return_address(0));
17703 }
17704
17705+void *module_alloc(unsigned long size)
17706+{
17707+
17708+#ifdef CONFIG_PAX_KERNEXEC
17709+ return __module_alloc(size, PAGE_KERNEL);
17710+#else
17711+ return __module_alloc(size, PAGE_KERNEL_EXEC);
17712+#endif
17713+
17714+}
17715+
17716+#ifdef CONFIG_PAX_KERNEXEC
17717+#ifdef CONFIG_X86_32
17718+void *module_alloc_exec(unsigned long size)
17719+{
17720+ struct vm_struct *area;
17721+
17722+ if (size == 0)
17723+ return NULL;
17724+
17725+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
17726+ return area ? area->addr : NULL;
17727+}
17728+EXPORT_SYMBOL(module_alloc_exec);
17729+
17730+void module_free_exec(struct module *mod, void *module_region)
17731+{
17732+ vunmap(module_region);
17733+}
17734+EXPORT_SYMBOL(module_free_exec);
17735+#else
17736+void module_free_exec(struct module *mod, void *module_region)
17737+{
17738+ module_free(mod, module_region);
17739+}
17740+EXPORT_SYMBOL(module_free_exec);
17741+
17742+void *module_alloc_exec(unsigned long size)
17743+{
17744+ return __module_alloc(size, PAGE_KERNEL_RX);
17745+}
17746+EXPORT_SYMBOL(module_alloc_exec);
17747+#endif
17748+#endif
17749+
17750 #ifdef CONFIG_X86_32
17751 int apply_relocate(Elf32_Shdr *sechdrs,
17752 const char *strtab,
17753@@ -55,14 +101,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
17754 unsigned int i;
17755 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
17756 Elf32_Sym *sym;
17757- uint32_t *location;
17758+ uint32_t *plocation, location;
17759
17760 DEBUGP("Applying relocate section %u to %u\n", relsec,
17761 sechdrs[relsec].sh_info);
17762 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
17763 /* This is where to make the change */
17764- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
17765- + rel[i].r_offset;
17766+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
17767+ location = (uint32_t)plocation;
17768+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
17769+ plocation = ktla_ktva((void *)plocation);
17770 /* This is the symbol it is referring to. Note that all
17771 undefined symbols have been resolved. */
17772 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
17773@@ -71,11 +119,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
17774 switch (ELF32_R_TYPE(rel[i].r_info)) {
17775 case R_386_32:
17776 /* We add the value into the location given */
17777- *location += sym->st_value;
17778+ pax_open_kernel();
17779+ *plocation += sym->st_value;
17780+ pax_close_kernel();
17781 break;
17782 case R_386_PC32:
17783 /* Add the value, subtract its postition */
17784- *location += sym->st_value - (uint32_t)location;
17785+ pax_open_kernel();
17786+ *plocation += sym->st_value - location;
17787+ pax_close_kernel();
17788 break;
17789 default:
17790 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
17791@@ -120,21 +172,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
17792 case R_X86_64_NONE:
17793 break;
17794 case R_X86_64_64:
17795+ pax_open_kernel();
17796 *(u64 *)loc = val;
17797+ pax_close_kernel();
17798 break;
17799 case R_X86_64_32:
17800+ pax_open_kernel();
17801 *(u32 *)loc = val;
17802+ pax_close_kernel();
17803 if (val != *(u32 *)loc)
17804 goto overflow;
17805 break;
17806 case R_X86_64_32S:
17807+ pax_open_kernel();
17808 *(s32 *)loc = val;
17809+ pax_close_kernel();
17810 if ((s64)val != *(s32 *)loc)
17811 goto overflow;
17812 break;
17813 case R_X86_64_PC32:
17814 val -= (u64)loc;
17815+ pax_open_kernel();
17816 *(u32 *)loc = val;
17817+ pax_close_kernel();
17818+
17819 #if 0
17820 if ((s64)val != *(s32 *)loc)
17821 goto overflow;
17822diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
17823index 47acaf3..ec48ab6 100644
17824--- a/arch/x86/kernel/nmi.c
17825+++ b/arch/x86/kernel/nmi.c
17826@@ -505,6 +505,17 @@ static inline void nmi_nesting_postprocess(void)
17827 dotraplinkage notrace __kprobes void
17828 do_nmi(struct pt_regs *regs, long error_code)
17829 {
17830+
17831+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17832+ if (!user_mode(regs)) {
17833+ unsigned long cs = regs->cs & 0xFFFF;
17834+ unsigned long ip = ktva_ktla(regs->ip);
17835+
17836+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17837+ regs->ip = ip;
17838+ }
17839+#endif
17840+
17841 nmi_nesting_preprocess(regs);
17842
17843 nmi_enter();
17844diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
17845index 676b8c7..870ba04 100644
17846--- a/arch/x86/kernel/paravirt-spinlocks.c
17847+++ b/arch/x86/kernel/paravirt-spinlocks.c
17848@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
17849 arch_spin_lock(lock);
17850 }
17851
17852-struct pv_lock_ops pv_lock_ops = {
17853+struct pv_lock_ops pv_lock_ops __read_only = {
17854 #ifdef CONFIG_SMP
17855 .spin_is_locked = __ticket_spin_is_locked,
17856 .spin_is_contended = __ticket_spin_is_contended,
17857diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
17858index d90272e..6bb013b 100644
17859--- a/arch/x86/kernel/paravirt.c
17860+++ b/arch/x86/kernel/paravirt.c
17861@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
17862 {
17863 return x;
17864 }
17865+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
17866+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
17867+#endif
17868
17869 void __init default_banner(void)
17870 {
17871@@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
17872 if (opfunc == NULL)
17873 /* If there's no function, patch it with a ud2a (BUG) */
17874 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
17875- else if (opfunc == _paravirt_nop)
17876+ else if (opfunc == (void *)_paravirt_nop)
17877 /* If the operation is a nop, then nop the callsite */
17878 ret = paravirt_patch_nop();
17879
17880 /* identity functions just return their single argument */
17881- else if (opfunc == _paravirt_ident_32)
17882+ else if (opfunc == (void *)_paravirt_ident_32)
17883 ret = paravirt_patch_ident_32(insnbuf, len);
17884- else if (opfunc == _paravirt_ident_64)
17885+ else if (opfunc == (void *)_paravirt_ident_64)
17886 ret = paravirt_patch_ident_64(insnbuf, len);
17887+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
17888+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
17889+ ret = paravirt_patch_ident_64(insnbuf, len);
17890+#endif
17891
17892 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
17893 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
17894@@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
17895 if (insn_len > len || start == NULL)
17896 insn_len = len;
17897 else
17898- memcpy(insnbuf, start, insn_len);
17899+ memcpy(insnbuf, ktla_ktva(start), insn_len);
17900
17901 return insn_len;
17902 }
17903@@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
17904 preempt_enable();
17905 }
17906
17907-struct pv_info pv_info = {
17908+struct pv_info pv_info __read_only = {
17909 .name = "bare hardware",
17910 .paravirt_enabled = 0,
17911 .kernel_rpl = 0,
17912@@ -313,16 +320,16 @@ struct pv_info pv_info = {
17913 #endif
17914 };
17915
17916-struct pv_init_ops pv_init_ops = {
17917+struct pv_init_ops pv_init_ops __read_only = {
17918 .patch = native_patch,
17919 };
17920
17921-struct pv_time_ops pv_time_ops = {
17922+struct pv_time_ops pv_time_ops __read_only = {
17923 .sched_clock = native_sched_clock,
17924 .steal_clock = native_steal_clock,
17925 };
17926
17927-struct pv_irq_ops pv_irq_ops = {
17928+struct pv_irq_ops pv_irq_ops __read_only = {
17929 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
17930 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
17931 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
17932@@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
17933 #endif
17934 };
17935
17936-struct pv_cpu_ops pv_cpu_ops = {
17937+struct pv_cpu_ops pv_cpu_ops __read_only = {
17938 .cpuid = native_cpuid,
17939 .get_debugreg = native_get_debugreg,
17940 .set_debugreg = native_set_debugreg,
17941@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
17942 .end_context_switch = paravirt_nop,
17943 };
17944
17945-struct pv_apic_ops pv_apic_ops = {
17946+struct pv_apic_ops pv_apic_ops __read_only = {
17947 #ifdef CONFIG_X86_LOCAL_APIC
17948 .startup_ipi_hook = paravirt_nop,
17949 #endif
17950 };
17951
17952-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
17953+#ifdef CONFIG_X86_32
17954+#ifdef CONFIG_X86_PAE
17955+/* 64-bit pagetable entries */
17956+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
17957+#else
17958 /* 32-bit pagetable entries */
17959 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
17960+#endif
17961 #else
17962 /* 64-bit pagetable entries */
17963 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
17964 #endif
17965
17966-struct pv_mmu_ops pv_mmu_ops = {
17967+struct pv_mmu_ops pv_mmu_ops __read_only = {
17968
17969 .read_cr2 = native_read_cr2,
17970 .write_cr2 = native_write_cr2,
17971@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
17972 .make_pud = PTE_IDENT,
17973
17974 .set_pgd = native_set_pgd,
17975+ .set_pgd_batched = native_set_pgd_batched,
17976 #endif
17977 #endif /* PAGETABLE_LEVELS >= 3 */
17978
17979@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
17980 },
17981
17982 .set_fixmap = native_set_fixmap,
17983+
17984+#ifdef CONFIG_PAX_KERNEXEC
17985+ .pax_open_kernel = native_pax_open_kernel,
17986+ .pax_close_kernel = native_pax_close_kernel,
17987+#endif
17988+
17989 };
17990
17991 EXPORT_SYMBOL_GPL(pv_time_ops);
17992diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
17993index 35ccf75..7a15747 100644
17994--- a/arch/x86/kernel/pci-iommu_table.c
17995+++ b/arch/x86/kernel/pci-iommu_table.c
17996@@ -2,7 +2,7 @@
17997 #include <asm/iommu_table.h>
17998 #include <linux/string.h>
17999 #include <linux/kallsyms.h>
18000-
18001+#include <linux/sched.h>
18002
18003 #define DEBUG 1
18004
18005diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18006index 15763af..da59ada 100644
18007--- a/arch/x86/kernel/process.c
18008+++ b/arch/x86/kernel/process.c
18009@@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
18010
18011 void free_thread_info(struct thread_info *ti)
18012 {
18013- free_thread_xstate(ti->task);
18014 free_pages((unsigned long)ti, THREAD_ORDER);
18015 }
18016
18017+static struct kmem_cache *task_struct_cachep;
18018+
18019 void arch_task_cache_init(void)
18020 {
18021- task_xstate_cachep =
18022- kmem_cache_create("task_xstate", xstate_size,
18023+ /* create a slab on which task_structs can be allocated */
18024+ task_struct_cachep =
18025+ kmem_cache_create("task_struct", sizeof(struct task_struct),
18026+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18027+
18028+ task_xstate_cachep =
18029+ kmem_cache_create("task_xstate", xstate_size,
18030 __alignof__(union thread_xstate),
18031- SLAB_PANIC | SLAB_NOTRACK, NULL);
18032+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18033+}
18034+
18035+struct task_struct *alloc_task_struct_node(int node)
18036+{
18037+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
18038+}
18039+
18040+void free_task_struct(struct task_struct *task)
18041+{
18042+ free_thread_xstate(task);
18043+ kmem_cache_free(task_struct_cachep, task);
18044 }
18045
18046 /*
18047@@ -70,7 +87,7 @@ void exit_thread(void)
18048 unsigned long *bp = t->io_bitmap_ptr;
18049
18050 if (bp) {
18051- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18052+ struct tss_struct *tss = init_tss + get_cpu();
18053
18054 t->io_bitmap_ptr = NULL;
18055 clear_thread_flag(TIF_IO_BITMAP);
18056@@ -106,7 +123,7 @@ void show_regs_common(void)
18057
18058 printk(KERN_CONT "\n");
18059 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18060- current->pid, current->comm, print_tainted(),
18061+ task_pid_nr(current), current->comm, print_tainted(),
18062 init_utsname()->release,
18063 (int)strcspn(init_utsname()->version, " "),
18064 init_utsname()->version);
18065@@ -120,6 +137,9 @@ void flush_thread(void)
18066 {
18067 struct task_struct *tsk = current;
18068
18069+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18070+ loadsegment(gs, 0);
18071+#endif
18072 flush_ptrace_hw_breakpoint(tsk);
18073 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18074 /*
18075@@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18076 regs.di = (unsigned long) arg;
18077
18078 #ifdef CONFIG_X86_32
18079- regs.ds = __USER_DS;
18080- regs.es = __USER_DS;
18081+ regs.ds = __KERNEL_DS;
18082+ regs.es = __KERNEL_DS;
18083 regs.fs = __KERNEL_PERCPU;
18084- regs.gs = __KERNEL_STACK_CANARY;
18085+ savesegment(gs, regs.gs);
18086 #else
18087 regs.ss = __KERNEL_DS;
18088 #endif
18089@@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
18090
18091 return ret;
18092 }
18093-void stop_this_cpu(void *dummy)
18094+__noreturn void stop_this_cpu(void *dummy)
18095 {
18096 local_irq_disable();
18097 /*
18098@@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
18099 }
18100 early_param("idle", idle_setup);
18101
18102-unsigned long arch_align_stack(unsigned long sp)
18103+#ifdef CONFIG_PAX_RANDKSTACK
18104+void pax_randomize_kstack(struct pt_regs *regs)
18105 {
18106- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18107- sp -= get_random_int() % 8192;
18108- return sp & ~0xf;
18109-}
18110+ struct thread_struct *thread = &current->thread;
18111+ unsigned long time;
18112
18113-unsigned long arch_randomize_brk(struct mm_struct *mm)
18114-{
18115- unsigned long range_end = mm->brk + 0x02000000;
18116- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18117-}
18118+ if (!randomize_va_space)
18119+ return;
18120+
18121+ if (v8086_mode(regs))
18122+ return;
18123
18124+ rdtscl(time);
18125+
18126+ /* P4 seems to return a 0 LSB, ignore it */
18127+#ifdef CONFIG_MPENTIUM4
18128+ time &= 0x3EUL;
18129+ time <<= 2;
18130+#elif defined(CONFIG_X86_64)
18131+ time &= 0xFUL;
18132+ time <<= 4;
18133+#else
18134+ time &= 0x1FUL;
18135+ time <<= 3;
18136+#endif
18137+
18138+ thread->sp0 ^= time;
18139+ load_sp0(init_tss + smp_processor_id(), thread);
18140+
18141+#ifdef CONFIG_X86_64
18142+ percpu_write(kernel_stack, thread->sp0);
18143+#endif
18144+}
18145+#endif
18146diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18147index c08d1ff..6ae1c81 100644
18148--- a/arch/x86/kernel/process_32.c
18149+++ b/arch/x86/kernel/process_32.c
18150@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18151 unsigned long thread_saved_pc(struct task_struct *tsk)
18152 {
18153 return ((unsigned long *)tsk->thread.sp)[3];
18154+//XXX return tsk->thread.eip;
18155 }
18156
18157 #ifndef CONFIG_SMP
18158@@ -132,15 +133,14 @@ void __show_regs(struct pt_regs *regs, int all)
18159 unsigned long sp;
18160 unsigned short ss, gs;
18161
18162- if (user_mode_vm(regs)) {
18163+ if (user_mode(regs)) {
18164 sp = regs->sp;
18165 ss = regs->ss & 0xffff;
18166- gs = get_user_gs(regs);
18167 } else {
18168 sp = kernel_stack_pointer(regs);
18169 savesegment(ss, ss);
18170- savesegment(gs, gs);
18171 }
18172+ gs = get_user_gs(regs);
18173
18174 show_regs_common();
18175
18176@@ -202,13 +202,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18177 struct task_struct *tsk;
18178 int err;
18179
18180- childregs = task_pt_regs(p);
18181+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18182 *childregs = *regs;
18183 childregs->ax = 0;
18184 childregs->sp = sp;
18185
18186 p->thread.sp = (unsigned long) childregs;
18187 p->thread.sp0 = (unsigned long) (childregs+1);
18188+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18189
18190 p->thread.ip = (unsigned long) ret_from_fork;
18191
18192@@ -299,7 +300,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18193 struct thread_struct *prev = &prev_p->thread,
18194 *next = &next_p->thread;
18195 int cpu = smp_processor_id();
18196- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18197+ struct tss_struct *tss = init_tss + cpu;
18198 fpu_switch_t fpu;
18199
18200 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18201@@ -323,6 +324,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18202 */
18203 lazy_save_gs(prev->gs);
18204
18205+#ifdef CONFIG_PAX_MEMORY_UDEREF
18206+ __set_fs(task_thread_info(next_p)->addr_limit);
18207+#endif
18208+
18209 /*
18210 * Load the per-thread Thread-Local Storage descriptor.
18211 */
18212@@ -353,6 +358,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18213 */
18214 arch_end_context_switch(next_p);
18215
18216+ percpu_write(current_task, next_p);
18217+ percpu_write(current_tinfo, &next_p->tinfo);
18218+
18219 /*
18220 * Restore %gs if needed (which is common)
18221 */
18222@@ -361,8 +369,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18223
18224 switch_fpu_finish(next_p, fpu);
18225
18226- percpu_write(current_task, next_p);
18227-
18228 return prev_p;
18229 }
18230
18231@@ -392,4 +398,3 @@ unsigned long get_wchan(struct task_struct *p)
18232 } while (count++ < 16);
18233 return 0;
18234 }
18235-
18236diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18237index cfa5c90..4facd28 100644
18238--- a/arch/x86/kernel/process_64.c
18239+++ b/arch/x86/kernel/process_64.c
18240@@ -89,7 +89,7 @@ static void __exit_idle(void)
18241 void exit_idle(void)
18242 {
18243 /* idle loop has pid 0 */
18244- if (current->pid)
18245+ if (task_pid_nr(current))
18246 return;
18247 __exit_idle();
18248 }
18249@@ -270,8 +270,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18250 struct pt_regs *childregs;
18251 struct task_struct *me = current;
18252
18253- childregs = ((struct pt_regs *)
18254- (THREAD_SIZE + task_stack_page(p))) - 1;
18255+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18256 *childregs = *regs;
18257
18258 childregs->ax = 0;
18259@@ -283,6 +282,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18260 p->thread.sp = (unsigned long) childregs;
18261 p->thread.sp0 = (unsigned long) (childregs+1);
18262 p->thread.usersp = me->thread.usersp;
18263+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18264
18265 set_tsk_thread_flag(p, TIF_FORK);
18266
18267@@ -385,7 +385,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18268 struct thread_struct *prev = &prev_p->thread;
18269 struct thread_struct *next = &next_p->thread;
18270 int cpu = smp_processor_id();
18271- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18272+ struct tss_struct *tss = init_tss + cpu;
18273 unsigned fsindex, gsindex;
18274 fpu_switch_t fpu;
18275
18276@@ -467,10 +467,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18277 prev->usersp = percpu_read(old_rsp);
18278 percpu_write(old_rsp, next->usersp);
18279 percpu_write(current_task, next_p);
18280+ percpu_write(current_tinfo, &next_p->tinfo);
18281
18282- percpu_write(kernel_stack,
18283- (unsigned long)task_stack_page(next_p) +
18284- THREAD_SIZE - KERNEL_STACK_OFFSET);
18285+ percpu_write(kernel_stack, next->sp0);
18286
18287 /*
18288 * Now maybe reload the debug registers and handle I/O bitmaps
18289@@ -525,12 +524,11 @@ unsigned long get_wchan(struct task_struct *p)
18290 if (!p || p == current || p->state == TASK_RUNNING)
18291 return 0;
18292 stack = (unsigned long)task_stack_page(p);
18293- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18294+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18295 return 0;
18296 fp = *(u64 *)(p->thread.sp);
18297 do {
18298- if (fp < (unsigned long)stack ||
18299- fp >= (unsigned long)stack+THREAD_SIZE)
18300+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18301 return 0;
18302 ip = *(u64 *)(fp+8);
18303 if (!in_sched_functions(ip))
18304diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18305index 5026738..e1b5aa8 100644
18306--- a/arch/x86/kernel/ptrace.c
18307+++ b/arch/x86/kernel/ptrace.c
18308@@ -792,6 +792,10 @@ static int ioperm_active(struct task_struct *target,
18309 static int ioperm_get(struct task_struct *target,
18310 const struct user_regset *regset,
18311 unsigned int pos, unsigned int count,
18312+ void *kbuf, void __user *ubuf) __size_overflow(3,4);
18313+static int ioperm_get(struct task_struct *target,
18314+ const struct user_regset *regset,
18315+ unsigned int pos, unsigned int count,
18316 void *kbuf, void __user *ubuf)
18317 {
18318 if (!target->thread.io_bitmap_ptr)
18319@@ -823,7 +827,7 @@ long arch_ptrace(struct task_struct *child, long request,
18320 unsigned long addr, unsigned long data)
18321 {
18322 int ret;
18323- unsigned long __user *datap = (unsigned long __user *)data;
18324+ unsigned long __user *datap = (__force unsigned long __user *)data;
18325
18326 switch (request) {
18327 /* read the word at location addr in the USER area. */
18328@@ -908,14 +912,14 @@ long arch_ptrace(struct task_struct *child, long request,
18329 if ((int) addr < 0)
18330 return -EIO;
18331 ret = do_get_thread_area(child, addr,
18332- (struct user_desc __user *)data);
18333+ (__force struct user_desc __user *) data);
18334 break;
18335
18336 case PTRACE_SET_THREAD_AREA:
18337 if ((int) addr < 0)
18338 return -EIO;
18339 ret = do_set_thread_area(child, addr,
18340- (struct user_desc __user *)data, 0);
18341+ (__force struct user_desc __user *) data, 0);
18342 break;
18343 #endif
18344
18345@@ -1332,7 +1336,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18346 memset(info, 0, sizeof(*info));
18347 info->si_signo = SIGTRAP;
18348 info->si_code = si_code;
18349- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18350+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18351 }
18352
18353 void user_single_step_siginfo(struct task_struct *tsk,
18354diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18355index 42eb330..139955c 100644
18356--- a/arch/x86/kernel/pvclock.c
18357+++ b/arch/x86/kernel/pvclock.c
18358@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18359 return pv_tsc_khz;
18360 }
18361
18362-static atomic64_t last_value = ATOMIC64_INIT(0);
18363+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18364
18365 void pvclock_resume(void)
18366 {
18367- atomic64_set(&last_value, 0);
18368+ atomic64_set_unchecked(&last_value, 0);
18369 }
18370
18371 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18372@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18373 * updating at the same time, and one of them could be slightly behind,
18374 * making the assumption that last_value always go forward fail to hold.
18375 */
18376- last = atomic64_read(&last_value);
18377+ last = atomic64_read_unchecked(&last_value);
18378 do {
18379 if (ret < last)
18380 return last;
18381- last = atomic64_cmpxchg(&last_value, last, ret);
18382+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18383 } while (unlikely(last != ret));
18384
18385 return ret;
18386diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18387index d840e69..98e9581 100644
18388--- a/arch/x86/kernel/reboot.c
18389+++ b/arch/x86/kernel/reboot.c
18390@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18391 EXPORT_SYMBOL(pm_power_off);
18392
18393 static const struct desc_ptr no_idt = {};
18394-static int reboot_mode;
18395+static unsigned short reboot_mode;
18396 enum reboot_type reboot_type = BOOT_ACPI;
18397 int reboot_force;
18398
18399@@ -335,13 +335,17 @@ core_initcall(reboot_init);
18400 extern const unsigned char machine_real_restart_asm[];
18401 extern const u64 machine_real_restart_gdt[3];
18402
18403-void machine_real_restart(unsigned int type)
18404+__noreturn void machine_real_restart(unsigned int type)
18405 {
18406 void *restart_va;
18407 unsigned long restart_pa;
18408- void (*restart_lowmem)(unsigned int);
18409+ void (* __noreturn restart_lowmem)(unsigned int);
18410 u64 *lowmem_gdt;
18411
18412+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18413+ struct desc_struct *gdt;
18414+#endif
18415+
18416 local_irq_disable();
18417
18418 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18419@@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
18420 boot)". This seems like a fairly standard thing that gets set by
18421 REBOOT.COM programs, and the previous reset routine did this
18422 too. */
18423- *((unsigned short *)0x472) = reboot_mode;
18424+ *(unsigned short *)(__va(0x472)) = reboot_mode;
18425
18426 /* Patch the GDT in the low memory trampoline */
18427 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18428
18429 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18430 restart_pa = virt_to_phys(restart_va);
18431- restart_lowmem = (void (*)(unsigned int))restart_pa;
18432+ restart_lowmem = (void *)restart_pa;
18433
18434 /* GDT[0]: GDT self-pointer */
18435 lowmem_gdt[0] =
18436@@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
18437 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18438
18439 /* Jump to the identity-mapped low memory code */
18440+
18441+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18442+ gdt = get_cpu_gdt_table(smp_processor_id());
18443+ pax_open_kernel();
18444+#ifdef CONFIG_PAX_MEMORY_UDEREF
18445+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18446+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18447+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18448+#endif
18449+#ifdef CONFIG_PAX_KERNEXEC
18450+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18451+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18452+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18453+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18454+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18455+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18456+#endif
18457+ pax_close_kernel();
18458+#endif
18459+
18460+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18461+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18462+ unreachable();
18463+#else
18464 restart_lowmem(type);
18465+#endif
18466+
18467 }
18468 #ifdef CONFIG_APM_MODULE
18469 EXPORT_SYMBOL(machine_real_restart);
18470@@ -556,7 +586,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18471 * try to force a triple fault and then cycle between hitting the keyboard
18472 * controller and doing that
18473 */
18474-static void native_machine_emergency_restart(void)
18475+__noreturn static void native_machine_emergency_restart(void)
18476 {
18477 int i;
18478 int attempt = 0;
18479@@ -680,13 +710,13 @@ void native_machine_shutdown(void)
18480 #endif
18481 }
18482
18483-static void __machine_emergency_restart(int emergency)
18484+static __noreturn void __machine_emergency_restart(int emergency)
18485 {
18486 reboot_emergency = emergency;
18487 machine_ops.emergency_restart();
18488 }
18489
18490-static void native_machine_restart(char *__unused)
18491+static __noreturn void native_machine_restart(char *__unused)
18492 {
18493 printk("machine restart\n");
18494
18495@@ -695,7 +725,7 @@ static void native_machine_restart(char *__unused)
18496 __machine_emergency_restart(0);
18497 }
18498
18499-static void native_machine_halt(void)
18500+static __noreturn void native_machine_halt(void)
18501 {
18502 /* stop other cpus and apics */
18503 machine_shutdown();
18504@@ -706,7 +736,7 @@ static void native_machine_halt(void)
18505 stop_this_cpu(NULL);
18506 }
18507
18508-static void native_machine_power_off(void)
18509+__noreturn static void native_machine_power_off(void)
18510 {
18511 if (pm_power_off) {
18512 if (!reboot_force)
18513@@ -715,6 +745,7 @@ static void native_machine_power_off(void)
18514 }
18515 /* a fallback in case there is no PM info available */
18516 tboot_shutdown(TB_SHUTDOWN_HALT);
18517+ unreachable();
18518 }
18519
18520 struct machine_ops machine_ops = {
18521diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
18522index 7a6f3b3..bed145d7 100644
18523--- a/arch/x86/kernel/relocate_kernel_64.S
18524+++ b/arch/x86/kernel/relocate_kernel_64.S
18525@@ -11,6 +11,7 @@
18526 #include <asm/kexec.h>
18527 #include <asm/processor-flags.h>
18528 #include <asm/pgtable_types.h>
18529+#include <asm/alternative-asm.h>
18530
18531 /*
18532 * Must be relocatable PIC code callable as a C function
18533@@ -160,13 +161,14 @@ identity_mapped:
18534 xorq %rbp, %rbp
18535 xorq %r8, %r8
18536 xorq %r9, %r9
18537- xorq %r10, %r9
18538+ xorq %r10, %r10
18539 xorq %r11, %r11
18540 xorq %r12, %r12
18541 xorq %r13, %r13
18542 xorq %r14, %r14
18543 xorq %r15, %r15
18544
18545+ pax_force_retaddr 0, 1
18546 ret
18547
18548 1:
18549diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
18550index d7d5099..28555d0 100644
18551--- a/arch/x86/kernel/setup.c
18552+++ b/arch/x86/kernel/setup.c
18553@@ -448,7 +448,7 @@ static void __init parse_setup_data(void)
18554
18555 switch (data->type) {
18556 case SETUP_E820_EXT:
18557- parse_e820_ext(data);
18558+ parse_e820_ext((struct setup_data __force_kernel *)data);
18559 break;
18560 case SETUP_DTB:
18561 add_dtb(pa_data);
18562@@ -649,7 +649,7 @@ static void __init trim_bios_range(void)
18563 * area (640->1Mb) as ram even though it is not.
18564 * take them out.
18565 */
18566- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
18567+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
18568 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
18569 }
18570
18571@@ -767,14 +767,14 @@ void __init setup_arch(char **cmdline_p)
18572
18573 if (!boot_params.hdr.root_flags)
18574 root_mountflags &= ~MS_RDONLY;
18575- init_mm.start_code = (unsigned long) _text;
18576- init_mm.end_code = (unsigned long) _etext;
18577+ init_mm.start_code = ktla_ktva((unsigned long) _text);
18578+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
18579 init_mm.end_data = (unsigned long) _edata;
18580 init_mm.brk = _brk_end;
18581
18582- code_resource.start = virt_to_phys(_text);
18583- code_resource.end = virt_to_phys(_etext)-1;
18584- data_resource.start = virt_to_phys(_etext);
18585+ code_resource.start = virt_to_phys(ktla_ktva(_text));
18586+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
18587+ data_resource.start = virt_to_phys(_sdata);
18588 data_resource.end = virt_to_phys(_edata)-1;
18589 bss_resource.start = virt_to_phys(&__bss_start);
18590 bss_resource.end = virt_to_phys(&__bss_stop)-1;
18591diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
18592index 71f4727..217419b 100644
18593--- a/arch/x86/kernel/setup_percpu.c
18594+++ b/arch/x86/kernel/setup_percpu.c
18595@@ -21,19 +21,17 @@
18596 #include <asm/cpu.h>
18597 #include <asm/stackprotector.h>
18598
18599-DEFINE_PER_CPU(int, cpu_number);
18600+#ifdef CONFIG_SMP
18601+DEFINE_PER_CPU(unsigned int, cpu_number);
18602 EXPORT_PER_CPU_SYMBOL(cpu_number);
18603+#endif
18604
18605-#ifdef CONFIG_X86_64
18606 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
18607-#else
18608-#define BOOT_PERCPU_OFFSET 0
18609-#endif
18610
18611 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
18612 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
18613
18614-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
18615+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
18616 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
18617 };
18618 EXPORT_SYMBOL(__per_cpu_offset);
18619@@ -96,6 +94,8 @@ static bool __init pcpu_need_numa(void)
18620 * Pointer to the allocated area on success, NULL on failure.
18621 */
18622 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
18623+ unsigned long align) __size_overflow(2);
18624+static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
18625 unsigned long align)
18626 {
18627 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
18628@@ -124,6 +124,8 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
18629 /*
18630 * Helpers for first chunk memory allocation
18631 */
18632+static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) __size_overflow(2);
18633+
18634 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
18635 {
18636 return pcpu_alloc_bootmem(cpu, size, align);
18637@@ -155,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
18638 {
18639 #ifdef CONFIG_X86_32
18640 struct desc_struct gdt;
18641+ unsigned long base = per_cpu_offset(cpu);
18642
18643- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
18644- 0x2 | DESCTYPE_S, 0x8);
18645- gdt.s = 1;
18646+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
18647+ 0x83 | DESCTYPE_S, 0xC);
18648 write_gdt_entry(get_cpu_gdt_table(cpu),
18649 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
18650 #endif
18651@@ -207,6 +209,11 @@ void __init setup_per_cpu_areas(void)
18652 /* alrighty, percpu areas up and running */
18653 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
18654 for_each_possible_cpu(cpu) {
18655+#ifdef CONFIG_CC_STACKPROTECTOR
18656+#ifdef CONFIG_X86_32
18657+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
18658+#endif
18659+#endif
18660 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
18661 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
18662 per_cpu(cpu_number, cpu) = cpu;
18663@@ -247,6 +254,12 @@ void __init setup_per_cpu_areas(void)
18664 */
18665 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
18666 #endif
18667+#ifdef CONFIG_CC_STACKPROTECTOR
18668+#ifdef CONFIG_X86_32
18669+ if (!cpu)
18670+ per_cpu(stack_canary.canary, cpu) = canary;
18671+#endif
18672+#endif
18673 /*
18674 * Up to this point, the boot CPU has been using .init.data
18675 * area. Reload any changed state for the boot CPU.
18676diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
18677index 46a01bd..2e88e6d 100644
18678--- a/arch/x86/kernel/signal.c
18679+++ b/arch/x86/kernel/signal.c
18680@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
18681 * Align the stack pointer according to the i386 ABI,
18682 * i.e. so that on function entry ((sp + 4) & 15) == 0.
18683 */
18684- sp = ((sp + 4) & -16ul) - 4;
18685+ sp = ((sp - 12) & -16ul) - 4;
18686 #else /* !CONFIG_X86_32 */
18687 sp = round_down(sp, 16) - 8;
18688 #endif
18689@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
18690 * Return an always-bogus address instead so we will die with SIGSEGV.
18691 */
18692 if (onsigstack && !likely(on_sig_stack(sp)))
18693- return (void __user *)-1L;
18694+ return (__force void __user *)-1L;
18695
18696 /* save i387 state */
18697 if (used_math() && save_i387_xstate(*fpstate) < 0)
18698- return (void __user *)-1L;
18699+ return (__force void __user *)-1L;
18700
18701 return (void __user *)sp;
18702 }
18703@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
18704 }
18705
18706 if (current->mm->context.vdso)
18707- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
18708+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
18709 else
18710- restorer = &frame->retcode;
18711+ restorer = (void __user *)&frame->retcode;
18712 if (ka->sa.sa_flags & SA_RESTORER)
18713 restorer = ka->sa.sa_restorer;
18714
18715@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
18716 * reasons and because gdb uses it as a signature to notice
18717 * signal handler stack frames.
18718 */
18719- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
18720+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
18721
18722 if (err)
18723 return -EFAULT;
18724@@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
18725 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
18726
18727 /* Set up to return from userspace. */
18728- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
18729+ if (current->mm->context.vdso)
18730+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
18731+ else
18732+ restorer = (void __user *)&frame->retcode;
18733 if (ka->sa.sa_flags & SA_RESTORER)
18734 restorer = ka->sa.sa_restorer;
18735 put_user_ex(restorer, &frame->pretcode);
18736@@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
18737 * reasons and because gdb uses it as a signature to notice
18738 * signal handler stack frames.
18739 */
18740- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
18741+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
18742 } put_user_catch(err);
18743
18744 if (err)
18745@@ -765,7 +768,7 @@ static void do_signal(struct pt_regs *regs)
18746 * X86_32: vm86 regs switched out by assembly code before reaching
18747 * here, so testing against kernel CS suffices.
18748 */
18749- if (!user_mode(regs))
18750+ if (!user_mode_novm(regs))
18751 return;
18752
18753 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
18754diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
18755index 66d250c..f1b10bd 100644
18756--- a/arch/x86/kernel/smpboot.c
18757+++ b/arch/x86/kernel/smpboot.c
18758@@ -715,17 +715,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
18759 set_idle_for_cpu(cpu, c_idle.idle);
18760 do_rest:
18761 per_cpu(current_task, cpu) = c_idle.idle;
18762+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
18763 #ifdef CONFIG_X86_32
18764 /* Stack for startup_32 can be just as for start_secondary onwards */
18765 irq_ctx_init(cpu);
18766 #else
18767 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
18768 initial_gs = per_cpu_offset(cpu);
18769- per_cpu(kernel_stack, cpu) =
18770- (unsigned long)task_stack_page(c_idle.idle) -
18771- KERNEL_STACK_OFFSET + THREAD_SIZE;
18772+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
18773 #endif
18774+
18775+ pax_open_kernel();
18776 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
18777+ pax_close_kernel();
18778+
18779 initial_code = (unsigned long)start_secondary;
18780 stack_start = c_idle.idle->thread.sp;
18781
18782@@ -868,6 +871,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
18783
18784 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
18785
18786+#ifdef CONFIG_PAX_PER_CPU_PGD
18787+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
18788+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18789+ KERNEL_PGD_PTRS);
18790+#endif
18791+
18792 err = do_boot_cpu(apicid, cpu);
18793 if (err) {
18794 pr_debug("do_boot_cpu failed %d\n", err);
18795diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
18796index c346d11..d43b163 100644
18797--- a/arch/x86/kernel/step.c
18798+++ b/arch/x86/kernel/step.c
18799@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
18800 struct desc_struct *desc;
18801 unsigned long base;
18802
18803- seg &= ~7UL;
18804+ seg >>= 3;
18805
18806 mutex_lock(&child->mm->context.lock);
18807- if (unlikely((seg >> 3) >= child->mm->context.size))
18808+ if (unlikely(seg >= child->mm->context.size))
18809 addr = -1L; /* bogus selector, access would fault */
18810 else {
18811 desc = child->mm->context.ldt + seg;
18812@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
18813 addr += base;
18814 }
18815 mutex_unlock(&child->mm->context.lock);
18816- }
18817+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
18818+ addr = ktla_ktva(addr);
18819
18820 return addr;
18821 }
18822@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
18823 unsigned char opcode[15];
18824 unsigned long addr = convert_ip_to_linear(child, regs);
18825
18826+ if (addr == -EINVAL)
18827+ return 0;
18828+
18829 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
18830 for (i = 0; i < copied; i++) {
18831 switch (opcode[i]) {
18832diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
18833index 0b0cb5f..db6b9ed 100644
18834--- a/arch/x86/kernel/sys_i386_32.c
18835+++ b/arch/x86/kernel/sys_i386_32.c
18836@@ -24,17 +24,224 @@
18837
18838 #include <asm/syscalls.h>
18839
18840-/*
18841- * Do a system call from kernel instead of calling sys_execve so we
18842- * end up with proper pt_regs.
18843- */
18844-int kernel_execve(const char *filename,
18845- const char *const argv[],
18846- const char *const envp[])
18847+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
18848 {
18849- long __res;
18850- asm volatile ("int $0x80"
18851- : "=a" (__res)
18852- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
18853- return __res;
18854+ unsigned long pax_task_size = TASK_SIZE;
18855+
18856+#ifdef CONFIG_PAX_SEGMEXEC
18857+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
18858+ pax_task_size = SEGMEXEC_TASK_SIZE;
18859+#endif
18860+
18861+ if (len > pax_task_size || addr > pax_task_size - len)
18862+ return -EINVAL;
18863+
18864+ return 0;
18865+}
18866+
18867+unsigned long
18868+arch_get_unmapped_area(struct file *filp, unsigned long addr,
18869+ unsigned long len, unsigned long pgoff, unsigned long flags)
18870+{
18871+ struct mm_struct *mm = current->mm;
18872+ struct vm_area_struct *vma;
18873+ unsigned long start_addr, pax_task_size = TASK_SIZE;
18874+
18875+#ifdef CONFIG_PAX_SEGMEXEC
18876+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
18877+ pax_task_size = SEGMEXEC_TASK_SIZE;
18878+#endif
18879+
18880+ pax_task_size -= PAGE_SIZE;
18881+
18882+ if (len > pax_task_size)
18883+ return -ENOMEM;
18884+
18885+ if (flags & MAP_FIXED)
18886+ return addr;
18887+
18888+#ifdef CONFIG_PAX_RANDMMAP
18889+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18890+#endif
18891+
18892+ if (addr) {
18893+ addr = PAGE_ALIGN(addr);
18894+ if (pax_task_size - len >= addr) {
18895+ vma = find_vma(mm, addr);
18896+ if (check_heap_stack_gap(vma, addr, len))
18897+ return addr;
18898+ }
18899+ }
18900+ if (len > mm->cached_hole_size) {
18901+ start_addr = addr = mm->free_area_cache;
18902+ } else {
18903+ start_addr = addr = mm->mmap_base;
18904+ mm->cached_hole_size = 0;
18905+ }
18906+
18907+#ifdef CONFIG_PAX_PAGEEXEC
18908+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
18909+ start_addr = 0x00110000UL;
18910+
18911+#ifdef CONFIG_PAX_RANDMMAP
18912+ if (mm->pax_flags & MF_PAX_RANDMMAP)
18913+ start_addr += mm->delta_mmap & 0x03FFF000UL;
18914+#endif
18915+
18916+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
18917+ start_addr = addr = mm->mmap_base;
18918+ else
18919+ addr = start_addr;
18920+ }
18921+#endif
18922+
18923+full_search:
18924+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
18925+ /* At this point: (!vma || addr < vma->vm_end). */
18926+ if (pax_task_size - len < addr) {
18927+ /*
18928+ * Start a new search - just in case we missed
18929+ * some holes.
18930+ */
18931+ if (start_addr != mm->mmap_base) {
18932+ start_addr = addr = mm->mmap_base;
18933+ mm->cached_hole_size = 0;
18934+ goto full_search;
18935+ }
18936+ return -ENOMEM;
18937+ }
18938+ if (check_heap_stack_gap(vma, addr, len))
18939+ break;
18940+ if (addr + mm->cached_hole_size < vma->vm_start)
18941+ mm->cached_hole_size = vma->vm_start - addr;
18942+ addr = vma->vm_end;
18943+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
18944+ start_addr = addr = mm->mmap_base;
18945+ mm->cached_hole_size = 0;
18946+ goto full_search;
18947+ }
18948+ }
18949+
18950+ /*
18951+ * Remember the place where we stopped the search:
18952+ */
18953+ mm->free_area_cache = addr + len;
18954+ return addr;
18955+}
18956+
18957+unsigned long
18958+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
18959+ const unsigned long len, const unsigned long pgoff,
18960+ const unsigned long flags)
18961+{
18962+ struct vm_area_struct *vma;
18963+ struct mm_struct *mm = current->mm;
18964+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
18965+
18966+#ifdef CONFIG_PAX_SEGMEXEC
18967+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
18968+ pax_task_size = SEGMEXEC_TASK_SIZE;
18969+#endif
18970+
18971+ pax_task_size -= PAGE_SIZE;
18972+
18973+ /* requested length too big for entire address space */
18974+ if (len > pax_task_size)
18975+ return -ENOMEM;
18976+
18977+ if (flags & MAP_FIXED)
18978+ return addr;
18979+
18980+#ifdef CONFIG_PAX_PAGEEXEC
18981+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
18982+ goto bottomup;
18983+#endif
18984+
18985+#ifdef CONFIG_PAX_RANDMMAP
18986+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
18987+#endif
18988+
18989+ /* requesting a specific address */
18990+ if (addr) {
18991+ addr = PAGE_ALIGN(addr);
18992+ if (pax_task_size - len >= addr) {
18993+ vma = find_vma(mm, addr);
18994+ if (check_heap_stack_gap(vma, addr, len))
18995+ return addr;
18996+ }
18997+ }
18998+
18999+ /* check if free_area_cache is useful for us */
19000+ if (len <= mm->cached_hole_size) {
19001+ mm->cached_hole_size = 0;
19002+ mm->free_area_cache = mm->mmap_base;
19003+ }
19004+
19005+ /* either no address requested or can't fit in requested address hole */
19006+ addr = mm->free_area_cache;
19007+
19008+ /* make sure it can fit in the remaining address space */
19009+ if (addr > len) {
19010+ vma = find_vma(mm, addr-len);
19011+ if (check_heap_stack_gap(vma, addr - len, len))
19012+ /* remember the address as a hint for next time */
19013+ return (mm->free_area_cache = addr-len);
19014+ }
19015+
19016+ if (mm->mmap_base < len)
19017+ goto bottomup;
19018+
19019+ addr = mm->mmap_base-len;
19020+
19021+ do {
19022+ /*
19023+ * Lookup failure means no vma is above this address,
19024+ * else if new region fits below vma->vm_start,
19025+ * return with success:
19026+ */
19027+ vma = find_vma(mm, addr);
19028+ if (check_heap_stack_gap(vma, addr, len))
19029+ /* remember the address as a hint for next time */
19030+ return (mm->free_area_cache = addr);
19031+
19032+ /* remember the largest hole we saw so far */
19033+ if (addr + mm->cached_hole_size < vma->vm_start)
19034+ mm->cached_hole_size = vma->vm_start - addr;
19035+
19036+ /* try just below the current vma->vm_start */
19037+ addr = skip_heap_stack_gap(vma, len);
19038+ } while (!IS_ERR_VALUE(addr));
19039+
19040+bottomup:
19041+ /*
19042+ * A failed mmap() very likely causes application failure,
19043+ * so fall back to the bottom-up function here. This scenario
19044+ * can happen with large stack limits and large mmap()
19045+ * allocations.
19046+ */
19047+
19048+#ifdef CONFIG_PAX_SEGMEXEC
19049+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19050+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19051+ else
19052+#endif
19053+
19054+ mm->mmap_base = TASK_UNMAPPED_BASE;
19055+
19056+#ifdef CONFIG_PAX_RANDMMAP
19057+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19058+ mm->mmap_base += mm->delta_mmap;
19059+#endif
19060+
19061+ mm->free_area_cache = mm->mmap_base;
19062+ mm->cached_hole_size = ~0UL;
19063+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19064+ /*
19065+ * Restore the topdown base:
19066+ */
19067+ mm->mmap_base = base;
19068+ mm->free_area_cache = base;
19069+ mm->cached_hole_size = ~0UL;
19070+
19071+ return addr;
19072 }
19073diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19074index 0514890..3dbebce 100644
19075--- a/arch/x86/kernel/sys_x86_64.c
19076+++ b/arch/x86/kernel/sys_x86_64.c
19077@@ -95,8 +95,8 @@ out:
19078 return error;
19079 }
19080
19081-static void find_start_end(unsigned long flags, unsigned long *begin,
19082- unsigned long *end)
19083+static void find_start_end(struct mm_struct *mm, unsigned long flags,
19084+ unsigned long *begin, unsigned long *end)
19085 {
19086 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19087 unsigned long new_begin;
19088@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19089 *begin = new_begin;
19090 }
19091 } else {
19092- *begin = TASK_UNMAPPED_BASE;
19093+ *begin = mm->mmap_base;
19094 *end = TASK_SIZE;
19095 }
19096 }
19097@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19098 if (flags & MAP_FIXED)
19099 return addr;
19100
19101- find_start_end(flags, &begin, &end);
19102+ find_start_end(mm, flags, &begin, &end);
19103
19104 if (len > end)
19105 return -ENOMEM;
19106
19107+#ifdef CONFIG_PAX_RANDMMAP
19108+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19109+#endif
19110+
19111 if (addr) {
19112 addr = PAGE_ALIGN(addr);
19113 vma = find_vma(mm, addr);
19114- if (end - len >= addr &&
19115- (!vma || addr + len <= vma->vm_start))
19116+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19117 return addr;
19118 }
19119 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19120@@ -172,7 +175,7 @@ full_search:
19121 }
19122 return -ENOMEM;
19123 }
19124- if (!vma || addr + len <= vma->vm_start) {
19125+ if (check_heap_stack_gap(vma, addr, len)) {
19126 /*
19127 * Remember the place where we stopped the search:
19128 */
19129@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19130 {
19131 struct vm_area_struct *vma;
19132 struct mm_struct *mm = current->mm;
19133- unsigned long addr = addr0;
19134+ unsigned long base = mm->mmap_base, addr = addr0;
19135
19136 /* requested length too big for entire address space */
19137 if (len > TASK_SIZE)
19138@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19139 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19140 goto bottomup;
19141
19142+#ifdef CONFIG_PAX_RANDMMAP
19143+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19144+#endif
19145+
19146 /* requesting a specific address */
19147 if (addr) {
19148 addr = PAGE_ALIGN(addr);
19149- vma = find_vma(mm, addr);
19150- if (TASK_SIZE - len >= addr &&
19151- (!vma || addr + len <= vma->vm_start))
19152- return addr;
19153+ if (TASK_SIZE - len >= addr) {
19154+ vma = find_vma(mm, addr);
19155+ if (check_heap_stack_gap(vma, addr, len))
19156+ return addr;
19157+ }
19158 }
19159
19160 /* check if free_area_cache is useful for us */
19161@@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19162 ALIGN_TOPDOWN);
19163
19164 vma = find_vma(mm, tmp_addr);
19165- if (!vma || tmp_addr + len <= vma->vm_start)
19166+ if (check_heap_stack_gap(vma, tmp_addr, len))
19167 /* remember the address as a hint for next time */
19168 return mm->free_area_cache = tmp_addr;
19169 }
19170@@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19171 * return with success:
19172 */
19173 vma = find_vma(mm, addr);
19174- if (!vma || addr+len <= vma->vm_start)
19175+ if (check_heap_stack_gap(vma, addr, len))
19176 /* remember the address as a hint for next time */
19177 return mm->free_area_cache = addr;
19178
19179@@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19180 mm->cached_hole_size = vma->vm_start - addr;
19181
19182 /* try just below the current vma->vm_start */
19183- addr = vma->vm_start-len;
19184- } while (len < vma->vm_start);
19185+ addr = skip_heap_stack_gap(vma, len);
19186+ } while (!IS_ERR_VALUE(addr));
19187
19188 bottomup:
19189 /*
19190@@ -270,13 +278,21 @@ bottomup:
19191 * can happen with large stack limits and large mmap()
19192 * allocations.
19193 */
19194+ mm->mmap_base = TASK_UNMAPPED_BASE;
19195+
19196+#ifdef CONFIG_PAX_RANDMMAP
19197+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19198+ mm->mmap_base += mm->delta_mmap;
19199+#endif
19200+
19201+ mm->free_area_cache = mm->mmap_base;
19202 mm->cached_hole_size = ~0UL;
19203- mm->free_area_cache = TASK_UNMAPPED_BASE;
19204 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19205 /*
19206 * Restore the topdown base:
19207 */
19208- mm->free_area_cache = mm->mmap_base;
19209+ mm->mmap_base = base;
19210+ mm->free_area_cache = base;
19211 mm->cached_hole_size = ~0UL;
19212
19213 return addr;
19214diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19215index e2410e2..4fe3fbc 100644
19216--- a/arch/x86/kernel/tboot.c
19217+++ b/arch/x86/kernel/tboot.c
19218@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
19219
19220 void tboot_shutdown(u32 shutdown_type)
19221 {
19222- void (*shutdown)(void);
19223+ void (* __noreturn shutdown)(void);
19224
19225 if (!tboot_enabled())
19226 return;
19227@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
19228
19229 switch_to_tboot_pt();
19230
19231- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19232+ shutdown = (void *)tboot->shutdown_entry;
19233 shutdown();
19234
19235 /* should not reach here */
19236@@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19237 tboot_shutdown(acpi_shutdown_map[sleep_state]);
19238 }
19239
19240-static atomic_t ap_wfs_count;
19241+static atomic_unchecked_t ap_wfs_count;
19242
19243 static int tboot_wait_for_aps(int num_aps)
19244 {
19245@@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19246 {
19247 switch (action) {
19248 case CPU_DYING:
19249- atomic_inc(&ap_wfs_count);
19250+ atomic_inc_unchecked(&ap_wfs_count);
19251 if (num_online_cpus() == 1)
19252- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19253+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19254 return NOTIFY_BAD;
19255 break;
19256 }
19257@@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
19258
19259 tboot_create_trampoline();
19260
19261- atomic_set(&ap_wfs_count, 0);
19262+ atomic_set_unchecked(&ap_wfs_count, 0);
19263 register_hotcpu_notifier(&tboot_cpu_notifier);
19264 return 0;
19265 }
19266diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19267index dd5fbf4..b7f2232 100644
19268--- a/arch/x86/kernel/time.c
19269+++ b/arch/x86/kernel/time.c
19270@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
19271 {
19272 unsigned long pc = instruction_pointer(regs);
19273
19274- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19275+ if (!user_mode(regs) && in_lock_functions(pc)) {
19276 #ifdef CONFIG_FRAME_POINTER
19277- return *(unsigned long *)(regs->bp + sizeof(long));
19278+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19279 #else
19280 unsigned long *sp =
19281 (unsigned long *)kernel_stack_pointer(regs);
19282@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19283 * or above a saved flags. Eflags has bits 22-31 zero,
19284 * kernel addresses don't.
19285 */
19286+
19287+#ifdef CONFIG_PAX_KERNEXEC
19288+ return ktla_ktva(sp[0]);
19289+#else
19290 if (sp[0] >> 22)
19291 return sp[0];
19292 if (sp[1] >> 22)
19293 return sp[1];
19294 #endif
19295+
19296+#endif
19297 }
19298 return pc;
19299 }
19300diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19301index bcfec2d..8f88b4a 100644
19302--- a/arch/x86/kernel/tls.c
19303+++ b/arch/x86/kernel/tls.c
19304@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19305 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19306 return -EINVAL;
19307
19308+#ifdef CONFIG_PAX_SEGMEXEC
19309+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19310+ return -EINVAL;
19311+#endif
19312+
19313 set_tls_desc(p, idx, &info, 1);
19314
19315 return 0;
19316diff --git a/arch/x86/kernel/tls.h b/arch/x86/kernel/tls.h
19317index 2f083a2..7d3fecc 100644
19318--- a/arch/x86/kernel/tls.h
19319+++ b/arch/x86/kernel/tls.h
19320@@ -16,6 +16,6 @@
19321
19322 extern user_regset_active_fn regset_tls_active;
19323 extern user_regset_get_fn regset_tls_get;
19324-extern user_regset_set_fn regset_tls_set;
19325+extern user_regset_set_fn regset_tls_set __size_overflow(4);
19326
19327 #endif /* _ARCH_X86_KERNEL_TLS_H */
19328diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19329index 451c0a7..e57f551 100644
19330--- a/arch/x86/kernel/trampoline_32.S
19331+++ b/arch/x86/kernel/trampoline_32.S
19332@@ -32,6 +32,12 @@
19333 #include <asm/segment.h>
19334 #include <asm/page_types.h>
19335
19336+#ifdef CONFIG_PAX_KERNEXEC
19337+#define ta(X) (X)
19338+#else
19339+#define ta(X) ((X) - __PAGE_OFFSET)
19340+#endif
19341+
19342 #ifdef CONFIG_SMP
19343
19344 .section ".x86_trampoline","a"
19345@@ -62,7 +68,7 @@ r_base = .
19346 inc %ax # protected mode (PE) bit
19347 lmsw %ax # into protected mode
19348 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19349- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19350+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
19351
19352 # These need to be in the same 64K segment as the above;
19353 # hence we don't use the boot_gdt_descr defined in head.S
19354diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19355index 09ff517..df19fbff 100644
19356--- a/arch/x86/kernel/trampoline_64.S
19357+++ b/arch/x86/kernel/trampoline_64.S
19358@@ -90,7 +90,7 @@ startup_32:
19359 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19360 movl %eax, %ds
19361
19362- movl $X86_CR4_PAE, %eax
19363+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19364 movl %eax, %cr4 # Enable PAE mode
19365
19366 # Setup trampoline 4 level pagetables
19367@@ -138,7 +138,7 @@ tidt:
19368 # so the kernel can live anywhere
19369 .balign 4
19370 tgdt:
19371- .short tgdt_end - tgdt # gdt limit
19372+ .short tgdt_end - tgdt - 1 # gdt limit
19373 .long tgdt - r_base
19374 .short 0
19375 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19376diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19377index 4bbe04d..41d0943 100644
19378--- a/arch/x86/kernel/traps.c
19379+++ b/arch/x86/kernel/traps.c
19380@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19381
19382 /* Do we ignore FPU interrupts ? */
19383 char ignore_fpu_irq;
19384-
19385-/*
19386- * The IDT has to be page-aligned to simplify the Pentium
19387- * F0 0F bug workaround.
19388- */
19389-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19390 #endif
19391
19392 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19393@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19394 }
19395
19396 static void __kprobes
19397-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19398+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19399 long error_code, siginfo_t *info)
19400 {
19401 struct task_struct *tsk = current;
19402
19403 #ifdef CONFIG_X86_32
19404- if (regs->flags & X86_VM_MASK) {
19405+ if (v8086_mode(regs)) {
19406 /*
19407 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19408 * On nmi (interrupt 2), do_trap should not be called.
19409@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19410 }
19411 #endif
19412
19413- if (!user_mode(regs))
19414+ if (!user_mode_novm(regs))
19415 goto kernel_trap;
19416
19417 #ifdef CONFIG_X86_32
19418@@ -148,7 +142,7 @@ trap_signal:
19419 printk_ratelimit()) {
19420 printk(KERN_INFO
19421 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19422- tsk->comm, tsk->pid, str,
19423+ tsk->comm, task_pid_nr(tsk), str,
19424 regs->ip, regs->sp, error_code);
19425 print_vma_addr(" in ", regs->ip);
19426 printk("\n");
19427@@ -165,8 +159,20 @@ kernel_trap:
19428 if (!fixup_exception(regs)) {
19429 tsk->thread.error_code = error_code;
19430 tsk->thread.trap_no = trapnr;
19431+
19432+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19433+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19434+ str = "PAX: suspicious stack segment fault";
19435+#endif
19436+
19437 die(str, regs, error_code);
19438 }
19439+
19440+#ifdef CONFIG_PAX_REFCOUNT
19441+ if (trapnr == 4)
19442+ pax_report_refcount_overflow(regs);
19443+#endif
19444+
19445 return;
19446
19447 #ifdef CONFIG_X86_32
19448@@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19449 conditional_sti(regs);
19450
19451 #ifdef CONFIG_X86_32
19452- if (regs->flags & X86_VM_MASK)
19453+ if (v8086_mode(regs))
19454 goto gp_in_vm86;
19455 #endif
19456
19457 tsk = current;
19458- if (!user_mode(regs))
19459+ if (!user_mode_novm(regs))
19460 goto gp_in_kernel;
19461
19462+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19463+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19464+ struct mm_struct *mm = tsk->mm;
19465+ unsigned long limit;
19466+
19467+ down_write(&mm->mmap_sem);
19468+ limit = mm->context.user_cs_limit;
19469+ if (limit < TASK_SIZE) {
19470+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19471+ up_write(&mm->mmap_sem);
19472+ return;
19473+ }
19474+ up_write(&mm->mmap_sem);
19475+ }
19476+#endif
19477+
19478 tsk->thread.error_code = error_code;
19479 tsk->thread.trap_no = 13;
19480
19481@@ -295,6 +317,13 @@ gp_in_kernel:
19482 if (notify_die(DIE_GPF, "general protection fault", regs,
19483 error_code, 13, SIGSEGV) == NOTIFY_STOP)
19484 return;
19485+
19486+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19487+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19488+ die("PAX: suspicious general protection fault", regs, error_code);
19489+ else
19490+#endif
19491+
19492 die("general protection fault", regs, error_code);
19493 }
19494
19495@@ -421,7 +450,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19496 /* It's safe to allow irq's after DR6 has been saved */
19497 preempt_conditional_sti(regs);
19498
19499- if (regs->flags & X86_VM_MASK) {
19500+ if (v8086_mode(regs)) {
19501 handle_vm86_trap((struct kernel_vm86_regs *) regs,
19502 error_code, 1);
19503 preempt_conditional_cli(regs);
19504@@ -436,7 +465,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19505 * We already checked v86 mode above, so we can check for kernel mode
19506 * by just checking the CPL of CS.
19507 */
19508- if ((dr6 & DR_STEP) && !user_mode(regs)) {
19509+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19510 tsk->thread.debugreg6 &= ~DR_STEP;
19511 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19512 regs->flags &= ~X86_EFLAGS_TF;
19513@@ -466,7 +495,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19514 return;
19515 conditional_sti(regs);
19516
19517- if (!user_mode_vm(regs))
19518+ if (!user_mode(regs))
19519 {
19520 if (!fixup_exception(regs)) {
19521 task->thread.error_code = error_code;
19522diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19523index b9242ba..50c5edd 100644
19524--- a/arch/x86/kernel/verify_cpu.S
19525+++ b/arch/x86/kernel/verify_cpu.S
19526@@ -20,6 +20,7 @@
19527 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19528 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19529 * arch/x86/kernel/head_32.S: processor startup
19530+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19531 *
19532 * verify_cpu, returns the status of longmode and SSE in register %eax.
19533 * 0: Success 1: Failure
19534diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19535index 328cb37..f37fee1 100644
19536--- a/arch/x86/kernel/vm86_32.c
19537+++ b/arch/x86/kernel/vm86_32.c
19538@@ -41,6 +41,7 @@
19539 #include <linux/ptrace.h>
19540 #include <linux/audit.h>
19541 #include <linux/stddef.h>
19542+#include <linux/grsecurity.h>
19543
19544 #include <asm/uaccess.h>
19545 #include <asm/io.h>
19546@@ -109,6 +110,9 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
19547 /* convert vm86_regs to kernel_vm86_regs */
19548 static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
19549 const struct vm86_regs __user *user,
19550+ unsigned extra) __size_overflow(3);
19551+static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
19552+ const struct vm86_regs __user *user,
19553 unsigned extra)
19554 {
19555 int ret = 0;
19556@@ -148,7 +152,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
19557 do_exit(SIGSEGV);
19558 }
19559
19560- tss = &per_cpu(init_tss, get_cpu());
19561+ tss = init_tss + get_cpu();
19562 current->thread.sp0 = current->thread.saved_sp0;
19563 current->thread.sysenter_cs = __KERNEL_CS;
19564 load_sp0(tss, &current->thread);
19565@@ -210,6 +214,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
19566 struct task_struct *tsk;
19567 int tmp, ret = -EPERM;
19568
19569+#ifdef CONFIG_GRKERNSEC_VM86
19570+ if (!capable(CAP_SYS_RAWIO)) {
19571+ gr_handle_vm86();
19572+ goto out;
19573+ }
19574+#endif
19575+
19576 tsk = current;
19577 if (tsk->thread.saved_sp0)
19578 goto out;
19579@@ -240,6 +251,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
19580 int tmp, ret;
19581 struct vm86plus_struct __user *v86;
19582
19583+#ifdef CONFIG_GRKERNSEC_VM86
19584+ if (!capable(CAP_SYS_RAWIO)) {
19585+ gr_handle_vm86();
19586+ ret = -EPERM;
19587+ goto out;
19588+ }
19589+#endif
19590+
19591 tsk = current;
19592 switch (cmd) {
19593 case VM86_REQUEST_IRQ:
19594@@ -326,7 +345,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
19595 tsk->thread.saved_fs = info->regs32->fs;
19596 tsk->thread.saved_gs = get_user_gs(info->regs32);
19597
19598- tss = &per_cpu(init_tss, get_cpu());
19599+ tss = init_tss + get_cpu();
19600 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
19601 if (cpu_has_sep)
19602 tsk->thread.sysenter_cs = 0;
19603@@ -533,7 +552,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
19604 goto cannot_handle;
19605 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
19606 goto cannot_handle;
19607- intr_ptr = (unsigned long __user *) (i << 2);
19608+ intr_ptr = (__force unsigned long __user *) (i << 2);
19609 if (get_user(segoffs, intr_ptr))
19610 goto cannot_handle;
19611 if ((segoffs >> 16) == BIOSSEG)
19612diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
19613index 0f703f1..9e15f64 100644
19614--- a/arch/x86/kernel/vmlinux.lds.S
19615+++ b/arch/x86/kernel/vmlinux.lds.S
19616@@ -26,6 +26,13 @@
19617 #include <asm/page_types.h>
19618 #include <asm/cache.h>
19619 #include <asm/boot.h>
19620+#include <asm/segment.h>
19621+
19622+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19623+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
19624+#else
19625+#define __KERNEL_TEXT_OFFSET 0
19626+#endif
19627
19628 #undef i386 /* in case the preprocessor is a 32bit one */
19629
19630@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
19631
19632 PHDRS {
19633 text PT_LOAD FLAGS(5); /* R_E */
19634+#ifdef CONFIG_X86_32
19635+ module PT_LOAD FLAGS(5); /* R_E */
19636+#endif
19637+#ifdef CONFIG_XEN
19638+ rodata PT_LOAD FLAGS(5); /* R_E */
19639+#else
19640+ rodata PT_LOAD FLAGS(4); /* R__ */
19641+#endif
19642 data PT_LOAD FLAGS(6); /* RW_ */
19643-#ifdef CONFIG_X86_64
19644+ init.begin PT_LOAD FLAGS(6); /* RW_ */
19645 #ifdef CONFIG_SMP
19646 percpu PT_LOAD FLAGS(6); /* RW_ */
19647 #endif
19648+ text.init PT_LOAD FLAGS(5); /* R_E */
19649+ text.exit PT_LOAD FLAGS(5); /* R_E */
19650 init PT_LOAD FLAGS(7); /* RWE */
19651-#endif
19652 note PT_NOTE FLAGS(0); /* ___ */
19653 }
19654
19655 SECTIONS
19656 {
19657 #ifdef CONFIG_X86_32
19658- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
19659- phys_startup_32 = startup_32 - LOAD_OFFSET;
19660+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
19661 #else
19662- . = __START_KERNEL;
19663- phys_startup_64 = startup_64 - LOAD_OFFSET;
19664+ . = __START_KERNEL;
19665 #endif
19666
19667 /* Text and read-only data */
19668- .text : AT(ADDR(.text) - LOAD_OFFSET) {
19669- _text = .;
19670+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
19671 /* bootstrapping code */
19672+#ifdef CONFIG_X86_32
19673+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19674+#else
19675+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19676+#endif
19677+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19678+ _text = .;
19679 HEAD_TEXT
19680 #ifdef CONFIG_X86_32
19681 . = ALIGN(PAGE_SIZE);
19682@@ -108,13 +128,47 @@ SECTIONS
19683 IRQENTRY_TEXT
19684 *(.fixup)
19685 *(.gnu.warning)
19686- /* End of text section */
19687- _etext = .;
19688 } :text = 0x9090
19689
19690- NOTES :text :note
19691+ . += __KERNEL_TEXT_OFFSET;
19692
19693- EXCEPTION_TABLE(16) :text = 0x9090
19694+#ifdef CONFIG_X86_32
19695+ . = ALIGN(PAGE_SIZE);
19696+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
19697+
19698+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
19699+ MODULES_EXEC_VADDR = .;
19700+ BYTE(0)
19701+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
19702+ . = ALIGN(HPAGE_SIZE);
19703+ MODULES_EXEC_END = . - 1;
19704+#endif
19705+
19706+ } :module
19707+#endif
19708+
19709+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
19710+ /* End of text section */
19711+ _etext = . - __KERNEL_TEXT_OFFSET;
19712+ }
19713+
19714+#ifdef CONFIG_X86_32
19715+ . = ALIGN(PAGE_SIZE);
19716+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
19717+ *(.idt)
19718+ . = ALIGN(PAGE_SIZE);
19719+ *(.empty_zero_page)
19720+ *(.initial_pg_fixmap)
19721+ *(.initial_pg_pmd)
19722+ *(.initial_page_table)
19723+ *(.swapper_pg_dir)
19724+ } :rodata
19725+#endif
19726+
19727+ . = ALIGN(PAGE_SIZE);
19728+ NOTES :rodata :note
19729+
19730+ EXCEPTION_TABLE(16) :rodata
19731
19732 #if defined(CONFIG_DEBUG_RODATA)
19733 /* .text should occupy whole number of pages */
19734@@ -126,16 +180,20 @@ SECTIONS
19735
19736 /* Data */
19737 .data : AT(ADDR(.data) - LOAD_OFFSET) {
19738+
19739+#ifdef CONFIG_PAX_KERNEXEC
19740+ . = ALIGN(HPAGE_SIZE);
19741+#else
19742+ . = ALIGN(PAGE_SIZE);
19743+#endif
19744+
19745 /* Start of data section */
19746 _sdata = .;
19747
19748 /* init_task */
19749 INIT_TASK_DATA(THREAD_SIZE)
19750
19751-#ifdef CONFIG_X86_32
19752- /* 32 bit has nosave before _edata */
19753 NOSAVE_DATA
19754-#endif
19755
19756 PAGE_ALIGNED_DATA(PAGE_SIZE)
19757
19758@@ -176,12 +234,19 @@ SECTIONS
19759 #endif /* CONFIG_X86_64 */
19760
19761 /* Init code and data - will be freed after init */
19762- . = ALIGN(PAGE_SIZE);
19763 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
19764+ BYTE(0)
19765+
19766+#ifdef CONFIG_PAX_KERNEXEC
19767+ . = ALIGN(HPAGE_SIZE);
19768+#else
19769+ . = ALIGN(PAGE_SIZE);
19770+#endif
19771+
19772 __init_begin = .; /* paired with __init_end */
19773- }
19774+ } :init.begin
19775
19776-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
19777+#ifdef CONFIG_SMP
19778 /*
19779 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
19780 * output PHDR, so the next output section - .init.text - should
19781@@ -190,12 +255,27 @@ SECTIONS
19782 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
19783 #endif
19784
19785- INIT_TEXT_SECTION(PAGE_SIZE)
19786-#ifdef CONFIG_X86_64
19787- :init
19788-#endif
19789+ . = ALIGN(PAGE_SIZE);
19790+ init_begin = .;
19791+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
19792+ VMLINUX_SYMBOL(_sinittext) = .;
19793+ INIT_TEXT
19794+ VMLINUX_SYMBOL(_einittext) = .;
19795+ . = ALIGN(PAGE_SIZE);
19796+ } :text.init
19797
19798- INIT_DATA_SECTION(16)
19799+ /*
19800+ * .exit.text is discard at runtime, not link time, to deal with
19801+ * references from .altinstructions and .eh_frame
19802+ */
19803+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
19804+ EXIT_TEXT
19805+ . = ALIGN(16);
19806+ } :text.exit
19807+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
19808+
19809+ . = ALIGN(PAGE_SIZE);
19810+ INIT_DATA_SECTION(16) :init
19811
19812 /*
19813 * Code and data for a variety of lowlevel trampolines, to be
19814@@ -269,19 +349,12 @@ SECTIONS
19815 }
19816
19817 . = ALIGN(8);
19818- /*
19819- * .exit.text is discard at runtime, not link time, to deal with
19820- * references from .altinstructions and .eh_frame
19821- */
19822- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
19823- EXIT_TEXT
19824- }
19825
19826 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
19827 EXIT_DATA
19828 }
19829
19830-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
19831+#ifndef CONFIG_SMP
19832 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
19833 #endif
19834
19835@@ -300,16 +373,10 @@ SECTIONS
19836 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
19837 __smp_locks = .;
19838 *(.smp_locks)
19839- . = ALIGN(PAGE_SIZE);
19840 __smp_locks_end = .;
19841+ . = ALIGN(PAGE_SIZE);
19842 }
19843
19844-#ifdef CONFIG_X86_64
19845- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
19846- NOSAVE_DATA
19847- }
19848-#endif
19849-
19850 /* BSS */
19851 . = ALIGN(PAGE_SIZE);
19852 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
19853@@ -325,6 +392,7 @@ SECTIONS
19854 __brk_base = .;
19855 . += 64 * 1024; /* 64k alignment slop space */
19856 *(.brk_reservation) /* areas brk users have reserved */
19857+ . = ALIGN(HPAGE_SIZE);
19858 __brk_limit = .;
19859 }
19860
19861@@ -351,13 +419,12 @@ SECTIONS
19862 * for the boot processor.
19863 */
19864 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
19865-INIT_PER_CPU(gdt_page);
19866 INIT_PER_CPU(irq_stack_union);
19867
19868 /*
19869 * Build-time check on the image size:
19870 */
19871-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
19872+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
19873 "kernel image bigger than KERNEL_IMAGE_SIZE");
19874
19875 #ifdef CONFIG_SMP
19876diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
19877index b07ba93..a212969 100644
19878--- a/arch/x86/kernel/vsyscall_64.c
19879+++ b/arch/x86/kernel/vsyscall_64.c
19880@@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
19881 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
19882 };
19883
19884-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
19885+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
19886
19887 static int __init vsyscall_setup(char *str)
19888 {
19889 if (str) {
19890 if (!strcmp("emulate", str))
19891 vsyscall_mode = EMULATE;
19892- else if (!strcmp("native", str))
19893- vsyscall_mode = NATIVE;
19894 else if (!strcmp("none", str))
19895 vsyscall_mode = NONE;
19896 else
19897@@ -207,7 +205,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
19898
19899 tsk = current;
19900 if (seccomp_mode(&tsk->seccomp))
19901- do_exit(SIGKILL);
19902+ do_group_exit(SIGKILL);
19903
19904 /*
19905 * With a real vsyscall, page faults cause SIGSEGV. We want to
19906@@ -279,8 +277,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
19907 return true;
19908
19909 sigsegv:
19910- force_sig(SIGSEGV, current);
19911- return true;
19912+ do_group_exit(SIGKILL);
19913 }
19914
19915 /*
19916@@ -333,10 +330,7 @@ void __init map_vsyscall(void)
19917 extern char __vvar_page;
19918 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
19919
19920- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
19921- vsyscall_mode == NATIVE
19922- ? PAGE_KERNEL_VSYSCALL
19923- : PAGE_KERNEL_VVAR);
19924+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
19925 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
19926 (unsigned long)VSYSCALL_START);
19927
19928diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
19929index 9796c2f..f686fbf 100644
19930--- a/arch/x86/kernel/x8664_ksyms_64.c
19931+++ b/arch/x86/kernel/x8664_ksyms_64.c
19932@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
19933 EXPORT_SYMBOL(copy_user_generic_string);
19934 EXPORT_SYMBOL(copy_user_generic_unrolled);
19935 EXPORT_SYMBOL(__copy_user_nocache);
19936-EXPORT_SYMBOL(_copy_from_user);
19937-EXPORT_SYMBOL(_copy_to_user);
19938
19939 EXPORT_SYMBOL(copy_page);
19940 EXPORT_SYMBOL(clear_page);
19941diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
19942index 7110911..e8cdee5 100644
19943--- a/arch/x86/kernel/xsave.c
19944+++ b/arch/x86/kernel/xsave.c
19945@@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
19946 fx_sw_user->xstate_size > fx_sw_user->extended_size)
19947 return -EINVAL;
19948
19949- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
19950+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
19951 fx_sw_user->extended_size -
19952 FP_XSTATE_MAGIC2_SIZE));
19953 if (err)
19954@@ -266,7 +266,7 @@ fx_only:
19955 * the other extended state.
19956 */
19957 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
19958- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
19959+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
19960 }
19961
19962 /*
19963@@ -295,7 +295,7 @@ int restore_i387_xstate(void __user *buf)
19964 if (use_xsave())
19965 err = restore_user_xstate(buf);
19966 else
19967- err = fxrstor_checking((__force struct i387_fxsave_struct *)
19968+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
19969 buf);
19970 if (unlikely(err)) {
19971 /*
19972diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
19973index 89b02bf..0f6511d 100644
19974--- a/arch/x86/kvm/cpuid.c
19975+++ b/arch/x86/kvm/cpuid.c
19976@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
19977 struct kvm_cpuid2 *cpuid,
19978 struct kvm_cpuid_entry2 __user *entries)
19979 {
19980- int r;
19981+ int r, i;
19982
19983 r = -E2BIG;
19984 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
19985 goto out;
19986 r = -EFAULT;
19987- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
19988- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
19989+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
19990 goto out;
19991+ for (i = 0; i < cpuid->nent; ++i) {
19992+ struct kvm_cpuid_entry2 cpuid_entry;
19993+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
19994+ goto out;
19995+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
19996+ }
19997 vcpu->arch.cpuid_nent = cpuid->nent;
19998 kvm_apic_set_version(vcpu);
19999 kvm_x86_ops->cpuid_update(vcpu);
20000@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20001 struct kvm_cpuid2 *cpuid,
20002 struct kvm_cpuid_entry2 __user *entries)
20003 {
20004- int r;
20005+ int r, i;
20006
20007 r = -E2BIG;
20008 if (cpuid->nent < vcpu->arch.cpuid_nent)
20009 goto out;
20010 r = -EFAULT;
20011- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20012- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20013+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20014 goto out;
20015+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20016+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20017+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20018+ goto out;
20019+ }
20020 return 0;
20021
20022 out:
20023diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20024index 0982507..7f6d72f 100644
20025--- a/arch/x86/kvm/emulate.c
20026+++ b/arch/x86/kvm/emulate.c
20027@@ -250,6 +250,7 @@ struct gprefix {
20028
20029 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
20030 do { \
20031+ unsigned long _tmp; \
20032 __asm__ __volatile__ ( \
20033 _PRE_EFLAGS("0", "4", "2") \
20034 _op _suffix " %"_x"3,%1; " \
20035@@ -264,8 +265,6 @@ struct gprefix {
20036 /* Raw emulation: instruction has two explicit operands. */
20037 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
20038 do { \
20039- unsigned long _tmp; \
20040- \
20041 switch ((ctxt)->dst.bytes) { \
20042 case 2: \
20043 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
20044@@ -281,7 +280,6 @@ struct gprefix {
20045
20046 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20047 do { \
20048- unsigned long _tmp; \
20049 switch ((ctxt)->dst.bytes) { \
20050 case 1: \
20051 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
20052diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
20053index cfdc6e0..ab92e84 100644
20054--- a/arch/x86/kvm/lapic.c
20055+++ b/arch/x86/kvm/lapic.c
20056@@ -54,7 +54,7 @@
20057 #define APIC_BUS_CYCLE_NS 1
20058
20059 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20060-#define apic_debug(fmt, arg...)
20061+#define apic_debug(fmt, arg...) do {} while (0)
20062
20063 #define APIC_LVT_NUM 6
20064 /* 14 is the version for Xeon and Pentium 8.4.8*/
20065diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
20066index 1561028..0ed7f14 100644
20067--- a/arch/x86/kvm/paging_tmpl.h
20068+++ b/arch/x86/kvm/paging_tmpl.h
20069@@ -197,7 +197,7 @@ retry_walk:
20070 if (unlikely(kvm_is_error_hva(host_addr)))
20071 goto error;
20072
20073- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20074+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20075 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20076 goto error;
20077
20078diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
20079index e385214..029e9dd 100644
20080--- a/arch/x86/kvm/svm.c
20081+++ b/arch/x86/kvm/svm.c
20082@@ -3051,6 +3051,7 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
20083 return 0;
20084 }
20085
20086+static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) __size_overflow(3);
20087 static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
20088 {
20089 struct vcpu_svm *svm = to_svm(vcpu);
20090@@ -3420,7 +3421,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
20091 int cpu = raw_smp_processor_id();
20092
20093 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20094+
20095+ pax_open_kernel();
20096 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20097+ pax_close_kernel();
20098+
20099 load_TR_desc();
20100 }
20101
20102@@ -3798,6 +3803,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
20103 #endif
20104 #endif
20105
20106+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20107+ __set_fs(current_thread_info()->addr_limit);
20108+#endif
20109+
20110 reload_tss(vcpu);
20111
20112 local_irq_disable();
20113diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
20114index 3b4c8d8..4ae0af6 100644
20115--- a/arch/x86/kvm/vmx.c
20116+++ b/arch/x86/kvm/vmx.c
20117@@ -1306,7 +1306,11 @@ static void reload_tss(void)
20118 struct desc_struct *descs;
20119
20120 descs = (void *)gdt->address;
20121+
20122+ pax_open_kernel();
20123 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
20124+ pax_close_kernel();
20125+
20126 load_TR_desc();
20127 }
20128
20129@@ -2162,6 +2166,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
20130 * Returns 0 on success, non-0 otherwise.
20131 * Assumes vcpu_load() was already called.
20132 */
20133+static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) __size_overflow(3);
20134 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
20135 {
20136 struct vcpu_vmx *vmx = to_vmx(vcpu);
20137@@ -2631,8 +2636,11 @@ static __init int hardware_setup(void)
20138 if (!cpu_has_vmx_flexpriority())
20139 flexpriority_enabled = 0;
20140
20141- if (!cpu_has_vmx_tpr_shadow())
20142- kvm_x86_ops->update_cr8_intercept = NULL;
20143+ if (!cpu_has_vmx_tpr_shadow()) {
20144+ pax_open_kernel();
20145+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
20146+ pax_close_kernel();
20147+ }
20148
20149 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20150 kvm_disable_largepages();
20151@@ -3648,7 +3656,7 @@ static void vmx_set_constant_host_state(void)
20152 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
20153
20154 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20155- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20156+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20157
20158 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20159 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
20160@@ -6184,6 +6192,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20161 "jmp .Lkvm_vmx_return \n\t"
20162 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20163 ".Lkvm_vmx_return: "
20164+
20165+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20166+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20167+ ".Lkvm_vmx_return2: "
20168+#endif
20169+
20170 /* Save guest registers, load host registers, keep flags */
20171 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20172 "pop %0 \n\t"
20173@@ -6232,6 +6246,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20174 #endif
20175 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20176 [wordsize]"i"(sizeof(ulong))
20177+
20178+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20179+ ,[cs]"i"(__KERNEL_CS)
20180+#endif
20181+
20182 : "cc", "memory"
20183 , R"ax", R"bx", R"di", R"si"
20184 #ifdef CONFIG_X86_64
20185@@ -6260,7 +6279,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20186 }
20187 }
20188
20189- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
20190+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
20191+
20192+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20193+ loadsegment(fs, __KERNEL_PERCPU);
20194+#endif
20195+
20196+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20197+ __set_fs(current_thread_info()->addr_limit);
20198+#endif
20199+
20200 vmx->loaded_vmcs->launched = 1;
20201
20202 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
20203diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20204index 9cbfc06..943ffa6 100644
20205--- a/arch/x86/kvm/x86.c
20206+++ b/arch/x86/kvm/x86.c
20207@@ -873,6 +873,7 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
20208 return kvm_set_msr(vcpu, index, *data);
20209 }
20210
20211+static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) __size_overflow(2);
20212 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
20213 {
20214 int version;
20215@@ -1307,12 +1308,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
20216 return 0;
20217 }
20218
20219+static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) __size_overflow(2);
20220 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
20221 {
20222 struct kvm *kvm = vcpu->kvm;
20223 int lm = is_long_mode(vcpu);
20224- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20225- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20226+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20227+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20228 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20229 : kvm->arch.xen_hvm_config.blob_size_32;
20230 u32 page_num = data & ~PAGE_MASK;
20231@@ -2145,6 +2147,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
20232 if (n < msr_list.nmsrs)
20233 goto out;
20234 r = -EFAULT;
20235+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20236+ goto out;
20237 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20238 num_msrs_to_save * sizeof(u32)))
20239 goto out;
20240@@ -2266,7 +2270,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
20241 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20242 struct kvm_interrupt *irq)
20243 {
20244- if (irq->irq < 0 || irq->irq >= 256)
20245+ if (irq->irq >= 256)
20246 return -EINVAL;
20247 if (irqchip_in_kernel(vcpu->kvm))
20248 return -ENXIO;
20249@@ -3497,6 +3501,9 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
20250
20251 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
20252 struct kvm_vcpu *vcpu, u32 access,
20253+ struct x86_exception *exception) __size_overflow(1,3);
20254+static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
20255+ struct kvm_vcpu *vcpu, u32 access,
20256 struct x86_exception *exception)
20257 {
20258 void *data = val;
20259@@ -3528,6 +3535,9 @@ out:
20260 /* used for instruction fetching */
20261 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
20262 gva_t addr, void *val, unsigned int bytes,
20263+ struct x86_exception *exception) __size_overflow(2,4);
20264+static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
20265+ gva_t addr, void *val, unsigned int bytes,
20266 struct x86_exception *exception)
20267 {
20268 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
20269@@ -3552,6 +3562,9 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
20270
20271 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
20272 gva_t addr, void *val, unsigned int bytes,
20273+ struct x86_exception *exception) __size_overflow(2,4);
20274+static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
20275+ gva_t addr, void *val, unsigned int bytes,
20276 struct x86_exception *exception)
20277 {
20278 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
20279@@ -3665,12 +3678,16 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
20280 }
20281
20282 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20283+ void *val, int bytes) __size_overflow(2);
20284+static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20285 void *val, int bytes)
20286 {
20287 return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
20288 }
20289
20290 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20291+ void *val, int bytes) __size_overflow(2);
20292+static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20293 void *val, int bytes)
20294 {
20295 return emulator_write_phys(vcpu, gpa, val, bytes);
20296@@ -3821,6 +3838,12 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
20297 const void *old,
20298 const void *new,
20299 unsigned int bytes,
20300+ struct x86_exception *exception) __size_overflow(5);
20301+static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
20302+ unsigned long addr,
20303+ const void *old,
20304+ const void *new,
20305+ unsigned int bytes,
20306 struct x86_exception *exception)
20307 {
20308 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
20309@@ -4780,7 +4803,7 @@ static void kvm_set_mmio_spte_mask(void)
20310 kvm_mmu_set_mmio_spte_mask(mask);
20311 }
20312
20313-int kvm_arch_init(void *opaque)
20314+int kvm_arch_init(const void *opaque)
20315 {
20316 int r;
20317 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
20318diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
20319index cb80c29..aeee86c 100644
20320--- a/arch/x86/kvm/x86.h
20321+++ b/arch/x86/kvm/x86.h
20322@@ -116,11 +116,11 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
20323
20324 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
20325 gva_t addr, void *val, unsigned int bytes,
20326- struct x86_exception *exception);
20327+ struct x86_exception *exception) __size_overflow(2,4);
20328
20329 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
20330 gva_t addr, void *val, unsigned int bytes,
20331- struct x86_exception *exception);
20332+ struct x86_exception *exception) __size_overflow(2,4);
20333
20334 extern u64 host_xcr0;
20335
20336diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
20337index 642d880..44e0f3f 100644
20338--- a/arch/x86/lguest/boot.c
20339+++ b/arch/x86/lguest/boot.c
20340@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
20341 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20342 * Launcher to reboot us.
20343 */
20344-static void lguest_restart(char *reason)
20345+static __noreturn void lguest_restart(char *reason)
20346 {
20347 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20348+ BUG();
20349 }
20350
20351 /*G:050
20352diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
20353index 042f682..c92afb6 100644
20354--- a/arch/x86/lib/atomic64_32.c
20355+++ b/arch/x86/lib/atomic64_32.c
20356@@ -8,18 +8,30 @@
20357
20358 long long atomic64_read_cx8(long long, const atomic64_t *v);
20359 EXPORT_SYMBOL(atomic64_read_cx8);
20360+long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
20361+EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
20362 long long atomic64_set_cx8(long long, const atomic64_t *v);
20363 EXPORT_SYMBOL(atomic64_set_cx8);
20364+long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
20365+EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
20366 long long atomic64_xchg_cx8(long long, unsigned high);
20367 EXPORT_SYMBOL(atomic64_xchg_cx8);
20368 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
20369 EXPORT_SYMBOL(atomic64_add_return_cx8);
20370+long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
20371+EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
20372 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
20373 EXPORT_SYMBOL(atomic64_sub_return_cx8);
20374+long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
20375+EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
20376 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
20377 EXPORT_SYMBOL(atomic64_inc_return_cx8);
20378+long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
20379+EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
20380 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
20381 EXPORT_SYMBOL(atomic64_dec_return_cx8);
20382+long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
20383+EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
20384 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
20385 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
20386 int atomic64_inc_not_zero_cx8(atomic64_t *v);
20387@@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
20388 #ifndef CONFIG_X86_CMPXCHG64
20389 long long atomic64_read_386(long long, const atomic64_t *v);
20390 EXPORT_SYMBOL(atomic64_read_386);
20391+long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
20392+EXPORT_SYMBOL(atomic64_read_unchecked_386);
20393 long long atomic64_set_386(long long, const atomic64_t *v);
20394 EXPORT_SYMBOL(atomic64_set_386);
20395+long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
20396+EXPORT_SYMBOL(atomic64_set_unchecked_386);
20397 long long atomic64_xchg_386(long long, unsigned high);
20398 EXPORT_SYMBOL(atomic64_xchg_386);
20399 long long atomic64_add_return_386(long long a, atomic64_t *v);
20400 EXPORT_SYMBOL(atomic64_add_return_386);
20401+long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20402+EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
20403 long long atomic64_sub_return_386(long long a, atomic64_t *v);
20404 EXPORT_SYMBOL(atomic64_sub_return_386);
20405+long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20406+EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
20407 long long atomic64_inc_return_386(long long a, atomic64_t *v);
20408 EXPORT_SYMBOL(atomic64_inc_return_386);
20409+long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20410+EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
20411 long long atomic64_dec_return_386(long long a, atomic64_t *v);
20412 EXPORT_SYMBOL(atomic64_dec_return_386);
20413+long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20414+EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
20415 long long atomic64_add_386(long long a, atomic64_t *v);
20416 EXPORT_SYMBOL(atomic64_add_386);
20417+long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
20418+EXPORT_SYMBOL(atomic64_add_unchecked_386);
20419 long long atomic64_sub_386(long long a, atomic64_t *v);
20420 EXPORT_SYMBOL(atomic64_sub_386);
20421+long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
20422+EXPORT_SYMBOL(atomic64_sub_unchecked_386);
20423 long long atomic64_inc_386(long long a, atomic64_t *v);
20424 EXPORT_SYMBOL(atomic64_inc_386);
20425+long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
20426+EXPORT_SYMBOL(atomic64_inc_unchecked_386);
20427 long long atomic64_dec_386(long long a, atomic64_t *v);
20428 EXPORT_SYMBOL(atomic64_dec_386);
20429+long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
20430+EXPORT_SYMBOL(atomic64_dec_unchecked_386);
20431 long long atomic64_dec_if_positive_386(atomic64_t *v);
20432 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
20433 int atomic64_inc_not_zero_386(atomic64_t *v);
20434diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20435index e8e7e0d..56fd1b0 100644
20436--- a/arch/x86/lib/atomic64_386_32.S
20437+++ b/arch/x86/lib/atomic64_386_32.S
20438@@ -48,6 +48,10 @@ BEGIN(read)
20439 movl (v), %eax
20440 movl 4(v), %edx
20441 RET_ENDP
20442+BEGIN(read_unchecked)
20443+ movl (v), %eax
20444+ movl 4(v), %edx
20445+RET_ENDP
20446 #undef v
20447
20448 #define v %esi
20449@@ -55,6 +59,10 @@ BEGIN(set)
20450 movl %ebx, (v)
20451 movl %ecx, 4(v)
20452 RET_ENDP
20453+BEGIN(set_unchecked)
20454+ movl %ebx, (v)
20455+ movl %ecx, 4(v)
20456+RET_ENDP
20457 #undef v
20458
20459 #define v %esi
20460@@ -70,6 +78,20 @@ RET_ENDP
20461 BEGIN(add)
20462 addl %eax, (v)
20463 adcl %edx, 4(v)
20464+
20465+#ifdef CONFIG_PAX_REFCOUNT
20466+ jno 0f
20467+ subl %eax, (v)
20468+ sbbl %edx, 4(v)
20469+ int $4
20470+0:
20471+ _ASM_EXTABLE(0b, 0b)
20472+#endif
20473+
20474+RET_ENDP
20475+BEGIN(add_unchecked)
20476+ addl %eax, (v)
20477+ adcl %edx, 4(v)
20478 RET_ENDP
20479 #undef v
20480
20481@@ -77,6 +99,24 @@ RET_ENDP
20482 BEGIN(add_return)
20483 addl (v), %eax
20484 adcl 4(v), %edx
20485+
20486+#ifdef CONFIG_PAX_REFCOUNT
20487+ into
20488+1234:
20489+ _ASM_EXTABLE(1234b, 2f)
20490+#endif
20491+
20492+ movl %eax, (v)
20493+ movl %edx, 4(v)
20494+
20495+#ifdef CONFIG_PAX_REFCOUNT
20496+2:
20497+#endif
20498+
20499+RET_ENDP
20500+BEGIN(add_return_unchecked)
20501+ addl (v), %eax
20502+ adcl 4(v), %edx
20503 movl %eax, (v)
20504 movl %edx, 4(v)
20505 RET_ENDP
20506@@ -86,6 +126,20 @@ RET_ENDP
20507 BEGIN(sub)
20508 subl %eax, (v)
20509 sbbl %edx, 4(v)
20510+
20511+#ifdef CONFIG_PAX_REFCOUNT
20512+ jno 0f
20513+ addl %eax, (v)
20514+ adcl %edx, 4(v)
20515+ int $4
20516+0:
20517+ _ASM_EXTABLE(0b, 0b)
20518+#endif
20519+
20520+RET_ENDP
20521+BEGIN(sub_unchecked)
20522+ subl %eax, (v)
20523+ sbbl %edx, 4(v)
20524 RET_ENDP
20525 #undef v
20526
20527@@ -96,6 +150,27 @@ BEGIN(sub_return)
20528 sbbl $0, %edx
20529 addl (v), %eax
20530 adcl 4(v), %edx
20531+
20532+#ifdef CONFIG_PAX_REFCOUNT
20533+ into
20534+1234:
20535+ _ASM_EXTABLE(1234b, 2f)
20536+#endif
20537+
20538+ movl %eax, (v)
20539+ movl %edx, 4(v)
20540+
20541+#ifdef CONFIG_PAX_REFCOUNT
20542+2:
20543+#endif
20544+
20545+RET_ENDP
20546+BEGIN(sub_return_unchecked)
20547+ negl %edx
20548+ negl %eax
20549+ sbbl $0, %edx
20550+ addl (v), %eax
20551+ adcl 4(v), %edx
20552 movl %eax, (v)
20553 movl %edx, 4(v)
20554 RET_ENDP
20555@@ -105,6 +180,20 @@ RET_ENDP
20556 BEGIN(inc)
20557 addl $1, (v)
20558 adcl $0, 4(v)
20559+
20560+#ifdef CONFIG_PAX_REFCOUNT
20561+ jno 0f
20562+ subl $1, (v)
20563+ sbbl $0, 4(v)
20564+ int $4
20565+0:
20566+ _ASM_EXTABLE(0b, 0b)
20567+#endif
20568+
20569+RET_ENDP
20570+BEGIN(inc_unchecked)
20571+ addl $1, (v)
20572+ adcl $0, 4(v)
20573 RET_ENDP
20574 #undef v
20575
20576@@ -114,6 +203,26 @@ BEGIN(inc_return)
20577 movl 4(v), %edx
20578 addl $1, %eax
20579 adcl $0, %edx
20580+
20581+#ifdef CONFIG_PAX_REFCOUNT
20582+ into
20583+1234:
20584+ _ASM_EXTABLE(1234b, 2f)
20585+#endif
20586+
20587+ movl %eax, (v)
20588+ movl %edx, 4(v)
20589+
20590+#ifdef CONFIG_PAX_REFCOUNT
20591+2:
20592+#endif
20593+
20594+RET_ENDP
20595+BEGIN(inc_return_unchecked)
20596+ movl (v), %eax
20597+ movl 4(v), %edx
20598+ addl $1, %eax
20599+ adcl $0, %edx
20600 movl %eax, (v)
20601 movl %edx, 4(v)
20602 RET_ENDP
20603@@ -123,6 +232,20 @@ RET_ENDP
20604 BEGIN(dec)
20605 subl $1, (v)
20606 sbbl $0, 4(v)
20607+
20608+#ifdef CONFIG_PAX_REFCOUNT
20609+ jno 0f
20610+ addl $1, (v)
20611+ adcl $0, 4(v)
20612+ int $4
20613+0:
20614+ _ASM_EXTABLE(0b, 0b)
20615+#endif
20616+
20617+RET_ENDP
20618+BEGIN(dec_unchecked)
20619+ subl $1, (v)
20620+ sbbl $0, 4(v)
20621 RET_ENDP
20622 #undef v
20623
20624@@ -132,6 +255,26 @@ BEGIN(dec_return)
20625 movl 4(v), %edx
20626 subl $1, %eax
20627 sbbl $0, %edx
20628+
20629+#ifdef CONFIG_PAX_REFCOUNT
20630+ into
20631+1234:
20632+ _ASM_EXTABLE(1234b, 2f)
20633+#endif
20634+
20635+ movl %eax, (v)
20636+ movl %edx, 4(v)
20637+
20638+#ifdef CONFIG_PAX_REFCOUNT
20639+2:
20640+#endif
20641+
20642+RET_ENDP
20643+BEGIN(dec_return_unchecked)
20644+ movl (v), %eax
20645+ movl 4(v), %edx
20646+ subl $1, %eax
20647+ sbbl $0, %edx
20648 movl %eax, (v)
20649 movl %edx, 4(v)
20650 RET_ENDP
20651@@ -143,6 +286,13 @@ BEGIN(add_unless)
20652 adcl %edx, %edi
20653 addl (v), %eax
20654 adcl 4(v), %edx
20655+
20656+#ifdef CONFIG_PAX_REFCOUNT
20657+ into
20658+1234:
20659+ _ASM_EXTABLE(1234b, 2f)
20660+#endif
20661+
20662 cmpl %eax, %esi
20663 je 3f
20664 1:
20665@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20666 1:
20667 addl $1, %eax
20668 adcl $0, %edx
20669+
20670+#ifdef CONFIG_PAX_REFCOUNT
20671+ into
20672+1234:
20673+ _ASM_EXTABLE(1234b, 2f)
20674+#endif
20675+
20676 movl %eax, (v)
20677 movl %edx, 4(v)
20678 movl $1, %eax
20679@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20680 movl 4(v), %edx
20681 subl $1, %eax
20682 sbbl $0, %edx
20683+
20684+#ifdef CONFIG_PAX_REFCOUNT
20685+ into
20686+1234:
20687+ _ASM_EXTABLE(1234b, 1f)
20688+#endif
20689+
20690 js 1f
20691 movl %eax, (v)
20692 movl %edx, 4(v)
20693diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20694index 391a083..d658e9f 100644
20695--- a/arch/x86/lib/atomic64_cx8_32.S
20696+++ b/arch/x86/lib/atomic64_cx8_32.S
20697@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20698 CFI_STARTPROC
20699
20700 read64 %ecx
20701+ pax_force_retaddr
20702 ret
20703 CFI_ENDPROC
20704 ENDPROC(atomic64_read_cx8)
20705
20706+ENTRY(atomic64_read_unchecked_cx8)
20707+ CFI_STARTPROC
20708+
20709+ read64 %ecx
20710+ pax_force_retaddr
20711+ ret
20712+ CFI_ENDPROC
20713+ENDPROC(atomic64_read_unchecked_cx8)
20714+
20715 ENTRY(atomic64_set_cx8)
20716 CFI_STARTPROC
20717
20718@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20719 cmpxchg8b (%esi)
20720 jne 1b
20721
20722+ pax_force_retaddr
20723 ret
20724 CFI_ENDPROC
20725 ENDPROC(atomic64_set_cx8)
20726
20727+ENTRY(atomic64_set_unchecked_cx8)
20728+ CFI_STARTPROC
20729+
20730+1:
20731+/* we don't need LOCK_PREFIX since aligned 64-bit writes
20732+ * are atomic on 586 and newer */
20733+ cmpxchg8b (%esi)
20734+ jne 1b
20735+
20736+ pax_force_retaddr
20737+ ret
20738+ CFI_ENDPROC
20739+ENDPROC(atomic64_set_unchecked_cx8)
20740+
20741 ENTRY(atomic64_xchg_cx8)
20742 CFI_STARTPROC
20743
20744@@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
20745 cmpxchg8b (%esi)
20746 jne 1b
20747
20748+ pax_force_retaddr
20749 ret
20750 CFI_ENDPROC
20751 ENDPROC(atomic64_xchg_cx8)
20752
20753-.macro addsub_return func ins insc
20754-ENTRY(atomic64_\func\()_return_cx8)
20755+.macro addsub_return func ins insc unchecked=""
20756+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20757 CFI_STARTPROC
20758 SAVE ebp
20759 SAVE ebx
20760@@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
20761 movl %edx, %ecx
20762 \ins\()l %esi, %ebx
20763 \insc\()l %edi, %ecx
20764+
20765+.ifb \unchecked
20766+#ifdef CONFIG_PAX_REFCOUNT
20767+ into
20768+2:
20769+ _ASM_EXTABLE(2b, 3f)
20770+#endif
20771+.endif
20772+
20773 LOCK_PREFIX
20774 cmpxchg8b (%ebp)
20775 jne 1b
20776-
20777-10:
20778 movl %ebx, %eax
20779 movl %ecx, %edx
20780+
20781+.ifb \unchecked
20782+#ifdef CONFIG_PAX_REFCOUNT
20783+3:
20784+#endif
20785+.endif
20786+
20787 RESTORE edi
20788 RESTORE esi
20789 RESTORE ebx
20790 RESTORE ebp
20791+ pax_force_retaddr
20792 ret
20793 CFI_ENDPROC
20794-ENDPROC(atomic64_\func\()_return_cx8)
20795+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20796 .endm
20797
20798 addsub_return add add adc
20799 addsub_return sub sub sbb
20800+addsub_return add add adc _unchecked
20801+addsub_return sub sub sbb _unchecked
20802
20803-.macro incdec_return func ins insc
20804-ENTRY(atomic64_\func\()_return_cx8)
20805+.macro incdec_return func ins insc unchecked
20806+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
20807 CFI_STARTPROC
20808 SAVE ebx
20809
20810@@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
20811 movl %edx, %ecx
20812 \ins\()l $1, %ebx
20813 \insc\()l $0, %ecx
20814+
20815+.ifb \unchecked
20816+#ifdef CONFIG_PAX_REFCOUNT
20817+ into
20818+2:
20819+ _ASM_EXTABLE(2b, 3f)
20820+#endif
20821+.endif
20822+
20823 LOCK_PREFIX
20824 cmpxchg8b (%esi)
20825 jne 1b
20826
20827-10:
20828 movl %ebx, %eax
20829 movl %ecx, %edx
20830+
20831+.ifb \unchecked
20832+#ifdef CONFIG_PAX_REFCOUNT
20833+3:
20834+#endif
20835+.endif
20836+
20837 RESTORE ebx
20838+ pax_force_retaddr
20839 ret
20840 CFI_ENDPROC
20841-ENDPROC(atomic64_\func\()_return_cx8)
20842+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
20843 .endm
20844
20845 incdec_return inc add adc
20846 incdec_return dec sub sbb
20847+incdec_return inc add adc _unchecked
20848+incdec_return dec sub sbb _unchecked
20849
20850 ENTRY(atomic64_dec_if_positive_cx8)
20851 CFI_STARTPROC
20852@@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
20853 movl %edx, %ecx
20854 subl $1, %ebx
20855 sbb $0, %ecx
20856+
20857+#ifdef CONFIG_PAX_REFCOUNT
20858+ into
20859+1234:
20860+ _ASM_EXTABLE(1234b, 2f)
20861+#endif
20862+
20863 js 2f
20864 LOCK_PREFIX
20865 cmpxchg8b (%esi)
20866@@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
20867 movl %ebx, %eax
20868 movl %ecx, %edx
20869 RESTORE ebx
20870+ pax_force_retaddr
20871 ret
20872 CFI_ENDPROC
20873 ENDPROC(atomic64_dec_if_positive_cx8)
20874@@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
20875 movl %edx, %ecx
20876 addl %esi, %ebx
20877 adcl %edi, %ecx
20878+
20879+#ifdef CONFIG_PAX_REFCOUNT
20880+ into
20881+1234:
20882+ _ASM_EXTABLE(1234b, 3f)
20883+#endif
20884+
20885 LOCK_PREFIX
20886 cmpxchg8b (%ebp)
20887 jne 1b
20888@@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
20889 CFI_ADJUST_CFA_OFFSET -8
20890 RESTORE ebx
20891 RESTORE ebp
20892+ pax_force_retaddr
20893 ret
20894 4:
20895 cmpl %edx, 4(%esp)
20896@@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
20897 movl %edx, %ecx
20898 addl $1, %ebx
20899 adcl $0, %ecx
20900+
20901+#ifdef CONFIG_PAX_REFCOUNT
20902+ into
20903+1234:
20904+ _ASM_EXTABLE(1234b, 3f)
20905+#endif
20906+
20907 LOCK_PREFIX
20908 cmpxchg8b (%esi)
20909 jne 1b
20910@@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
20911 movl $1, %eax
20912 3:
20913 RESTORE ebx
20914+ pax_force_retaddr
20915 ret
20916 4:
20917 testl %edx, %edx
20918diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
20919index 78d16a5..fbcf666 100644
20920--- a/arch/x86/lib/checksum_32.S
20921+++ b/arch/x86/lib/checksum_32.S
20922@@ -28,7 +28,8 @@
20923 #include <linux/linkage.h>
20924 #include <asm/dwarf2.h>
20925 #include <asm/errno.h>
20926-
20927+#include <asm/segment.h>
20928+
20929 /*
20930 * computes a partial checksum, e.g. for TCP/UDP fragments
20931 */
20932@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
20933
20934 #define ARGBASE 16
20935 #define FP 12
20936-
20937-ENTRY(csum_partial_copy_generic)
20938+
20939+ENTRY(csum_partial_copy_generic_to_user)
20940 CFI_STARTPROC
20941+
20942+#ifdef CONFIG_PAX_MEMORY_UDEREF
20943+ pushl_cfi %gs
20944+ popl_cfi %es
20945+ jmp csum_partial_copy_generic
20946+#endif
20947+
20948+ENTRY(csum_partial_copy_generic_from_user)
20949+
20950+#ifdef CONFIG_PAX_MEMORY_UDEREF
20951+ pushl_cfi %gs
20952+ popl_cfi %ds
20953+#endif
20954+
20955+ENTRY(csum_partial_copy_generic)
20956 subl $4,%esp
20957 CFI_ADJUST_CFA_OFFSET 4
20958 pushl_cfi %edi
20959@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
20960 jmp 4f
20961 SRC(1: movw (%esi), %bx )
20962 addl $2, %esi
20963-DST( movw %bx, (%edi) )
20964+DST( movw %bx, %es:(%edi) )
20965 addl $2, %edi
20966 addw %bx, %ax
20967 adcl $0, %eax
20968@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
20969 SRC(1: movl (%esi), %ebx )
20970 SRC( movl 4(%esi), %edx )
20971 adcl %ebx, %eax
20972-DST( movl %ebx, (%edi) )
20973+DST( movl %ebx, %es:(%edi) )
20974 adcl %edx, %eax
20975-DST( movl %edx, 4(%edi) )
20976+DST( movl %edx, %es:4(%edi) )
20977
20978 SRC( movl 8(%esi), %ebx )
20979 SRC( movl 12(%esi), %edx )
20980 adcl %ebx, %eax
20981-DST( movl %ebx, 8(%edi) )
20982+DST( movl %ebx, %es:8(%edi) )
20983 adcl %edx, %eax
20984-DST( movl %edx, 12(%edi) )
20985+DST( movl %edx, %es:12(%edi) )
20986
20987 SRC( movl 16(%esi), %ebx )
20988 SRC( movl 20(%esi), %edx )
20989 adcl %ebx, %eax
20990-DST( movl %ebx, 16(%edi) )
20991+DST( movl %ebx, %es:16(%edi) )
20992 adcl %edx, %eax
20993-DST( movl %edx, 20(%edi) )
20994+DST( movl %edx, %es:20(%edi) )
20995
20996 SRC( movl 24(%esi), %ebx )
20997 SRC( movl 28(%esi), %edx )
20998 adcl %ebx, %eax
20999-DST( movl %ebx, 24(%edi) )
21000+DST( movl %ebx, %es:24(%edi) )
21001 adcl %edx, %eax
21002-DST( movl %edx, 28(%edi) )
21003+DST( movl %edx, %es:28(%edi) )
21004
21005 lea 32(%esi), %esi
21006 lea 32(%edi), %edi
21007@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
21008 shrl $2, %edx # This clears CF
21009 SRC(3: movl (%esi), %ebx )
21010 adcl %ebx, %eax
21011-DST( movl %ebx, (%edi) )
21012+DST( movl %ebx, %es:(%edi) )
21013 lea 4(%esi), %esi
21014 lea 4(%edi), %edi
21015 dec %edx
21016@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
21017 jb 5f
21018 SRC( movw (%esi), %cx )
21019 leal 2(%esi), %esi
21020-DST( movw %cx, (%edi) )
21021+DST( movw %cx, %es:(%edi) )
21022 leal 2(%edi), %edi
21023 je 6f
21024 shll $16,%ecx
21025 SRC(5: movb (%esi), %cl )
21026-DST( movb %cl, (%edi) )
21027+DST( movb %cl, %es:(%edi) )
21028 6: addl %ecx, %eax
21029 adcl $0, %eax
21030 7:
21031@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
21032
21033 6001:
21034 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21035- movl $-EFAULT, (%ebx)
21036+ movl $-EFAULT, %ss:(%ebx)
21037
21038 # zero the complete destination - computing the rest
21039 # is too much work
21040@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
21041
21042 6002:
21043 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21044- movl $-EFAULT,(%ebx)
21045+ movl $-EFAULT,%ss:(%ebx)
21046 jmp 5000b
21047
21048 .previous
21049
21050+ pushl_cfi %ss
21051+ popl_cfi %ds
21052+ pushl_cfi %ss
21053+ popl_cfi %es
21054 popl_cfi %ebx
21055 CFI_RESTORE ebx
21056 popl_cfi %esi
21057@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
21058 popl_cfi %ecx # equivalent to addl $4,%esp
21059 ret
21060 CFI_ENDPROC
21061-ENDPROC(csum_partial_copy_generic)
21062+ENDPROC(csum_partial_copy_generic_to_user)
21063
21064 #else
21065
21066 /* Version for PentiumII/PPro */
21067
21068 #define ROUND1(x) \
21069+ nop; nop; nop; \
21070 SRC(movl x(%esi), %ebx ) ; \
21071 addl %ebx, %eax ; \
21072- DST(movl %ebx, x(%edi) ) ;
21073+ DST(movl %ebx, %es:x(%edi)) ;
21074
21075 #define ROUND(x) \
21076+ nop; nop; nop; \
21077 SRC(movl x(%esi), %ebx ) ; \
21078 adcl %ebx, %eax ; \
21079- DST(movl %ebx, x(%edi) ) ;
21080+ DST(movl %ebx, %es:x(%edi)) ;
21081
21082 #define ARGBASE 12
21083-
21084-ENTRY(csum_partial_copy_generic)
21085+
21086+ENTRY(csum_partial_copy_generic_to_user)
21087 CFI_STARTPROC
21088+
21089+#ifdef CONFIG_PAX_MEMORY_UDEREF
21090+ pushl_cfi %gs
21091+ popl_cfi %es
21092+ jmp csum_partial_copy_generic
21093+#endif
21094+
21095+ENTRY(csum_partial_copy_generic_from_user)
21096+
21097+#ifdef CONFIG_PAX_MEMORY_UDEREF
21098+ pushl_cfi %gs
21099+ popl_cfi %ds
21100+#endif
21101+
21102+ENTRY(csum_partial_copy_generic)
21103 pushl_cfi %ebx
21104 CFI_REL_OFFSET ebx, 0
21105 pushl_cfi %edi
21106@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
21107 subl %ebx, %edi
21108 lea -1(%esi),%edx
21109 andl $-32,%edx
21110- lea 3f(%ebx,%ebx), %ebx
21111+ lea 3f(%ebx,%ebx,2), %ebx
21112 testl %esi, %esi
21113 jmp *%ebx
21114 1: addl $64,%esi
21115@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
21116 jb 5f
21117 SRC( movw (%esi), %dx )
21118 leal 2(%esi), %esi
21119-DST( movw %dx, (%edi) )
21120+DST( movw %dx, %es:(%edi) )
21121 leal 2(%edi), %edi
21122 je 6f
21123 shll $16,%edx
21124 5:
21125 SRC( movb (%esi), %dl )
21126-DST( movb %dl, (%edi) )
21127+DST( movb %dl, %es:(%edi) )
21128 6: addl %edx, %eax
21129 adcl $0, %eax
21130 7:
21131 .section .fixup, "ax"
21132 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21133- movl $-EFAULT, (%ebx)
21134+ movl $-EFAULT, %ss:(%ebx)
21135 # zero the complete destination (computing the rest is too much work)
21136 movl ARGBASE+8(%esp),%edi # dst
21137 movl ARGBASE+12(%esp),%ecx # len
21138@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
21139 rep; stosb
21140 jmp 7b
21141 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21142- movl $-EFAULT, (%ebx)
21143+ movl $-EFAULT, %ss:(%ebx)
21144 jmp 7b
21145 .previous
21146
21147+#ifdef CONFIG_PAX_MEMORY_UDEREF
21148+ pushl_cfi %ss
21149+ popl_cfi %ds
21150+ pushl_cfi %ss
21151+ popl_cfi %es
21152+#endif
21153+
21154 popl_cfi %esi
21155 CFI_RESTORE esi
21156 popl_cfi %edi
21157@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
21158 CFI_RESTORE ebx
21159 ret
21160 CFI_ENDPROC
21161-ENDPROC(csum_partial_copy_generic)
21162+ENDPROC(csum_partial_copy_generic_to_user)
21163
21164 #undef ROUND
21165 #undef ROUND1
21166diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21167index f2145cf..cea889d 100644
21168--- a/arch/x86/lib/clear_page_64.S
21169+++ b/arch/x86/lib/clear_page_64.S
21170@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
21171 movl $4096/8,%ecx
21172 xorl %eax,%eax
21173 rep stosq
21174+ pax_force_retaddr
21175 ret
21176 CFI_ENDPROC
21177 ENDPROC(clear_page_c)
21178@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
21179 movl $4096,%ecx
21180 xorl %eax,%eax
21181 rep stosb
21182+ pax_force_retaddr
21183 ret
21184 CFI_ENDPROC
21185 ENDPROC(clear_page_c_e)
21186@@ -43,6 +45,7 @@ ENTRY(clear_page)
21187 leaq 64(%rdi),%rdi
21188 jnz .Lloop
21189 nop
21190+ pax_force_retaddr
21191 ret
21192 CFI_ENDPROC
21193 .Lclear_page_end:
21194@@ -58,7 +61,7 @@ ENDPROC(clear_page)
21195
21196 #include <asm/cpufeature.h>
21197
21198- .section .altinstr_replacement,"ax"
21199+ .section .altinstr_replacement,"a"
21200 1: .byte 0xeb /* jmp <disp8> */
21201 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21202 2: .byte 0xeb /* jmp <disp8> */
21203diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21204index 1e572c5..2a162cd 100644
21205--- a/arch/x86/lib/cmpxchg16b_emu.S
21206+++ b/arch/x86/lib/cmpxchg16b_emu.S
21207@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21208
21209 popf
21210 mov $1, %al
21211+ pax_force_retaddr
21212 ret
21213
21214 not_same:
21215 popf
21216 xor %al,%al
21217+ pax_force_retaddr
21218 ret
21219
21220 CFI_ENDPROC
21221diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21222index 01c805b..dccb07f 100644
21223--- a/arch/x86/lib/copy_page_64.S
21224+++ b/arch/x86/lib/copy_page_64.S
21225@@ -9,6 +9,7 @@ copy_page_c:
21226 CFI_STARTPROC
21227 movl $4096/8,%ecx
21228 rep movsq
21229+ pax_force_retaddr
21230 ret
21231 CFI_ENDPROC
21232 ENDPROC(copy_page_c)
21233@@ -39,7 +40,7 @@ ENTRY(copy_page)
21234 movq 16 (%rsi), %rdx
21235 movq 24 (%rsi), %r8
21236 movq 32 (%rsi), %r9
21237- movq 40 (%rsi), %r10
21238+ movq 40 (%rsi), %r13
21239 movq 48 (%rsi), %r11
21240 movq 56 (%rsi), %r12
21241
21242@@ -50,7 +51,7 @@ ENTRY(copy_page)
21243 movq %rdx, 16 (%rdi)
21244 movq %r8, 24 (%rdi)
21245 movq %r9, 32 (%rdi)
21246- movq %r10, 40 (%rdi)
21247+ movq %r13, 40 (%rdi)
21248 movq %r11, 48 (%rdi)
21249 movq %r12, 56 (%rdi)
21250
21251@@ -69,7 +70,7 @@ ENTRY(copy_page)
21252 movq 16 (%rsi), %rdx
21253 movq 24 (%rsi), %r8
21254 movq 32 (%rsi), %r9
21255- movq 40 (%rsi), %r10
21256+ movq 40 (%rsi), %r13
21257 movq 48 (%rsi), %r11
21258 movq 56 (%rsi), %r12
21259
21260@@ -78,7 +79,7 @@ ENTRY(copy_page)
21261 movq %rdx, 16 (%rdi)
21262 movq %r8, 24 (%rdi)
21263 movq %r9, 32 (%rdi)
21264- movq %r10, 40 (%rdi)
21265+ movq %r13, 40 (%rdi)
21266 movq %r11, 48 (%rdi)
21267 movq %r12, 56 (%rdi)
21268
21269@@ -95,6 +96,7 @@ ENTRY(copy_page)
21270 CFI_RESTORE r13
21271 addq $3*8,%rsp
21272 CFI_ADJUST_CFA_OFFSET -3*8
21273+ pax_force_retaddr
21274 ret
21275 .Lcopy_page_end:
21276 CFI_ENDPROC
21277@@ -105,7 +107,7 @@ ENDPROC(copy_page)
21278
21279 #include <asm/cpufeature.h>
21280
21281- .section .altinstr_replacement,"ax"
21282+ .section .altinstr_replacement,"a"
21283 1: .byte 0xeb /* jmp <disp8> */
21284 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21285 2:
21286diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21287index 0248402..821c786 100644
21288--- a/arch/x86/lib/copy_user_64.S
21289+++ b/arch/x86/lib/copy_user_64.S
21290@@ -16,6 +16,7 @@
21291 #include <asm/thread_info.h>
21292 #include <asm/cpufeature.h>
21293 #include <asm/alternative-asm.h>
21294+#include <asm/pgtable.h>
21295
21296 /*
21297 * By placing feature2 after feature1 in altinstructions section, we logically
21298@@ -29,7 +30,7 @@
21299 .byte 0xe9 /* 32bit jump */
21300 .long \orig-1f /* by default jump to orig */
21301 1:
21302- .section .altinstr_replacement,"ax"
21303+ .section .altinstr_replacement,"a"
21304 2: .byte 0xe9 /* near jump with 32bit immediate */
21305 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21306 3: .byte 0xe9 /* near jump with 32bit immediate */
21307@@ -71,47 +72,20 @@
21308 #endif
21309 .endm
21310
21311-/* Standard copy_to_user with segment limit checking */
21312-ENTRY(_copy_to_user)
21313- CFI_STARTPROC
21314- GET_THREAD_INFO(%rax)
21315- movq %rdi,%rcx
21316- addq %rdx,%rcx
21317- jc bad_to_user
21318- cmpq TI_addr_limit(%rax),%rcx
21319- ja bad_to_user
21320- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21321- copy_user_generic_unrolled,copy_user_generic_string, \
21322- copy_user_enhanced_fast_string
21323- CFI_ENDPROC
21324-ENDPROC(_copy_to_user)
21325-
21326-/* Standard copy_from_user with segment limit checking */
21327-ENTRY(_copy_from_user)
21328- CFI_STARTPROC
21329- GET_THREAD_INFO(%rax)
21330- movq %rsi,%rcx
21331- addq %rdx,%rcx
21332- jc bad_from_user
21333- cmpq TI_addr_limit(%rax),%rcx
21334- ja bad_from_user
21335- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21336- copy_user_generic_unrolled,copy_user_generic_string, \
21337- copy_user_enhanced_fast_string
21338- CFI_ENDPROC
21339-ENDPROC(_copy_from_user)
21340-
21341 .section .fixup,"ax"
21342 /* must zero dest */
21343 ENTRY(bad_from_user)
21344 bad_from_user:
21345 CFI_STARTPROC
21346+ testl %edx,%edx
21347+ js bad_to_user
21348 movl %edx,%ecx
21349 xorl %eax,%eax
21350 rep
21351 stosb
21352 bad_to_user:
21353 movl %edx,%eax
21354+ pax_force_retaddr
21355 ret
21356 CFI_ENDPROC
21357 ENDPROC(bad_from_user)
21358@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21359 jz 17f
21360 1: movq (%rsi),%r8
21361 2: movq 1*8(%rsi),%r9
21362-3: movq 2*8(%rsi),%r10
21363+3: movq 2*8(%rsi),%rax
21364 4: movq 3*8(%rsi),%r11
21365 5: movq %r8,(%rdi)
21366 6: movq %r9,1*8(%rdi)
21367-7: movq %r10,2*8(%rdi)
21368+7: movq %rax,2*8(%rdi)
21369 8: movq %r11,3*8(%rdi)
21370 9: movq 4*8(%rsi),%r8
21371 10: movq 5*8(%rsi),%r9
21372-11: movq 6*8(%rsi),%r10
21373+11: movq 6*8(%rsi),%rax
21374 12: movq 7*8(%rsi),%r11
21375 13: movq %r8,4*8(%rdi)
21376 14: movq %r9,5*8(%rdi)
21377-15: movq %r10,6*8(%rdi)
21378+15: movq %rax,6*8(%rdi)
21379 16: movq %r11,7*8(%rdi)
21380 leaq 64(%rsi),%rsi
21381 leaq 64(%rdi),%rdi
21382@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
21383 decl %ecx
21384 jnz 21b
21385 23: xor %eax,%eax
21386+ pax_force_retaddr
21387 ret
21388
21389 .section .fixup,"ax"
21390@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
21391 3: rep
21392 movsb
21393 4: xorl %eax,%eax
21394+ pax_force_retaddr
21395 ret
21396
21397 .section .fixup,"ax"
21398@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
21399 1: rep
21400 movsb
21401 2: xorl %eax,%eax
21402+ pax_force_retaddr
21403 ret
21404
21405 .section .fixup,"ax"
21406diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21407index cb0c112..e3a6895 100644
21408--- a/arch/x86/lib/copy_user_nocache_64.S
21409+++ b/arch/x86/lib/copy_user_nocache_64.S
21410@@ -8,12 +8,14 @@
21411
21412 #include <linux/linkage.h>
21413 #include <asm/dwarf2.h>
21414+#include <asm/alternative-asm.h>
21415
21416 #define FIX_ALIGNMENT 1
21417
21418 #include <asm/current.h>
21419 #include <asm/asm-offsets.h>
21420 #include <asm/thread_info.h>
21421+#include <asm/pgtable.h>
21422
21423 .macro ALIGN_DESTINATION
21424 #ifdef FIX_ALIGNMENT
21425@@ -50,6 +52,15 @@
21426 */
21427 ENTRY(__copy_user_nocache)
21428 CFI_STARTPROC
21429+
21430+#ifdef CONFIG_PAX_MEMORY_UDEREF
21431+ mov $PAX_USER_SHADOW_BASE,%rcx
21432+ cmp %rcx,%rsi
21433+ jae 1f
21434+ add %rcx,%rsi
21435+1:
21436+#endif
21437+
21438 cmpl $8,%edx
21439 jb 20f /* less then 8 bytes, go to byte copy loop */
21440 ALIGN_DESTINATION
21441@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21442 jz 17f
21443 1: movq (%rsi),%r8
21444 2: movq 1*8(%rsi),%r9
21445-3: movq 2*8(%rsi),%r10
21446+3: movq 2*8(%rsi),%rax
21447 4: movq 3*8(%rsi),%r11
21448 5: movnti %r8,(%rdi)
21449 6: movnti %r9,1*8(%rdi)
21450-7: movnti %r10,2*8(%rdi)
21451+7: movnti %rax,2*8(%rdi)
21452 8: movnti %r11,3*8(%rdi)
21453 9: movq 4*8(%rsi),%r8
21454 10: movq 5*8(%rsi),%r9
21455-11: movq 6*8(%rsi),%r10
21456+11: movq 6*8(%rsi),%rax
21457 12: movq 7*8(%rsi),%r11
21458 13: movnti %r8,4*8(%rdi)
21459 14: movnti %r9,5*8(%rdi)
21460-15: movnti %r10,6*8(%rdi)
21461+15: movnti %rax,6*8(%rdi)
21462 16: movnti %r11,7*8(%rdi)
21463 leaq 64(%rsi),%rsi
21464 leaq 64(%rdi),%rdi
21465@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21466 jnz 21b
21467 23: xorl %eax,%eax
21468 sfence
21469+ pax_force_retaddr
21470 ret
21471
21472 .section .fixup,"ax"
21473diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21474index fb903b7..c92b7f7 100644
21475--- a/arch/x86/lib/csum-copy_64.S
21476+++ b/arch/x86/lib/csum-copy_64.S
21477@@ -8,6 +8,7 @@
21478 #include <linux/linkage.h>
21479 #include <asm/dwarf2.h>
21480 #include <asm/errno.h>
21481+#include <asm/alternative-asm.h>
21482
21483 /*
21484 * Checksum copy with exception handling.
21485@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21486 CFI_RESTORE rbp
21487 addq $7*8, %rsp
21488 CFI_ADJUST_CFA_OFFSET -7*8
21489+ pax_force_retaddr 0, 1
21490 ret
21491 CFI_RESTORE_STATE
21492
21493diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21494index 459b58a..9570bc7 100644
21495--- a/arch/x86/lib/csum-wrappers_64.c
21496+++ b/arch/x86/lib/csum-wrappers_64.c
21497@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21498 len -= 2;
21499 }
21500 }
21501- isum = csum_partial_copy_generic((__force const void *)src,
21502+
21503+#ifdef CONFIG_PAX_MEMORY_UDEREF
21504+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21505+ src += PAX_USER_SHADOW_BASE;
21506+#endif
21507+
21508+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
21509 dst, len, isum, errp, NULL);
21510 if (unlikely(*errp))
21511 goto out_err;
21512@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21513 }
21514
21515 *errp = 0;
21516- return csum_partial_copy_generic(src, (void __force *)dst,
21517+
21518+#ifdef CONFIG_PAX_MEMORY_UDEREF
21519+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21520+ dst += PAX_USER_SHADOW_BASE;
21521+#endif
21522+
21523+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21524 len, isum, NULL, errp);
21525 }
21526 EXPORT_SYMBOL(csum_partial_copy_to_user);
21527diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21528index 51f1504..ddac4c1 100644
21529--- a/arch/x86/lib/getuser.S
21530+++ b/arch/x86/lib/getuser.S
21531@@ -33,15 +33,38 @@
21532 #include <asm/asm-offsets.h>
21533 #include <asm/thread_info.h>
21534 #include <asm/asm.h>
21535+#include <asm/segment.h>
21536+#include <asm/pgtable.h>
21537+#include <asm/alternative-asm.h>
21538+
21539+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21540+#define __copyuser_seg gs;
21541+#else
21542+#define __copyuser_seg
21543+#endif
21544
21545 .text
21546 ENTRY(__get_user_1)
21547 CFI_STARTPROC
21548+
21549+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21550 GET_THREAD_INFO(%_ASM_DX)
21551 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21552 jae bad_get_user
21553-1: movzb (%_ASM_AX),%edx
21554+
21555+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21556+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21557+ cmp %_ASM_DX,%_ASM_AX
21558+ jae 1234f
21559+ add %_ASM_DX,%_ASM_AX
21560+1234:
21561+#endif
21562+
21563+#endif
21564+
21565+1: __copyuser_seg movzb (%_ASM_AX),%edx
21566 xor %eax,%eax
21567+ pax_force_retaddr
21568 ret
21569 CFI_ENDPROC
21570 ENDPROC(__get_user_1)
21571@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21572 ENTRY(__get_user_2)
21573 CFI_STARTPROC
21574 add $1,%_ASM_AX
21575+
21576+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21577 jc bad_get_user
21578 GET_THREAD_INFO(%_ASM_DX)
21579 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21580 jae bad_get_user
21581-2: movzwl -1(%_ASM_AX),%edx
21582+
21583+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21584+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21585+ cmp %_ASM_DX,%_ASM_AX
21586+ jae 1234f
21587+ add %_ASM_DX,%_ASM_AX
21588+1234:
21589+#endif
21590+
21591+#endif
21592+
21593+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21594 xor %eax,%eax
21595+ pax_force_retaddr
21596 ret
21597 CFI_ENDPROC
21598 ENDPROC(__get_user_2)
21599@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21600 ENTRY(__get_user_4)
21601 CFI_STARTPROC
21602 add $3,%_ASM_AX
21603+
21604+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21605 jc bad_get_user
21606 GET_THREAD_INFO(%_ASM_DX)
21607 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21608 jae bad_get_user
21609-3: mov -3(%_ASM_AX),%edx
21610+
21611+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21612+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21613+ cmp %_ASM_DX,%_ASM_AX
21614+ jae 1234f
21615+ add %_ASM_DX,%_ASM_AX
21616+1234:
21617+#endif
21618+
21619+#endif
21620+
21621+3: __copyuser_seg mov -3(%_ASM_AX),%edx
21622 xor %eax,%eax
21623+ pax_force_retaddr
21624 ret
21625 CFI_ENDPROC
21626 ENDPROC(__get_user_4)
21627@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21628 GET_THREAD_INFO(%_ASM_DX)
21629 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21630 jae bad_get_user
21631+
21632+#ifdef CONFIG_PAX_MEMORY_UDEREF
21633+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21634+ cmp %_ASM_DX,%_ASM_AX
21635+ jae 1234f
21636+ add %_ASM_DX,%_ASM_AX
21637+1234:
21638+#endif
21639+
21640 4: movq -7(%_ASM_AX),%_ASM_DX
21641 xor %eax,%eax
21642+ pax_force_retaddr
21643 ret
21644 CFI_ENDPROC
21645 ENDPROC(__get_user_8)
21646@@ -91,6 +152,7 @@ bad_get_user:
21647 CFI_STARTPROC
21648 xor %edx,%edx
21649 mov $(-EFAULT),%_ASM_AX
21650+ pax_force_retaddr
21651 ret
21652 CFI_ENDPROC
21653 END(bad_get_user)
21654diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21655index 5a1f9f3..ba9f577 100644
21656--- a/arch/x86/lib/insn.c
21657+++ b/arch/x86/lib/insn.c
21658@@ -21,6 +21,11 @@
21659 #include <linux/string.h>
21660 #include <asm/inat.h>
21661 #include <asm/insn.h>
21662+#ifdef __KERNEL__
21663+#include <asm/pgtable_types.h>
21664+#else
21665+#define ktla_ktva(addr) addr
21666+#endif
21667
21668 /* Verify next sizeof(t) bytes can be on the same instruction */
21669 #define validate_next(t, insn, n) \
21670@@ -49,8 +54,8 @@
21671 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21672 {
21673 memset(insn, 0, sizeof(*insn));
21674- insn->kaddr = kaddr;
21675- insn->next_byte = kaddr;
21676+ insn->kaddr = ktla_ktva(kaddr);
21677+ insn->next_byte = ktla_ktva(kaddr);
21678 insn->x86_64 = x86_64 ? 1 : 0;
21679 insn->opnd_bytes = 4;
21680 if (x86_64)
21681diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21682index 05a95e7..326f2fa 100644
21683--- a/arch/x86/lib/iomap_copy_64.S
21684+++ b/arch/x86/lib/iomap_copy_64.S
21685@@ -17,6 +17,7 @@
21686
21687 #include <linux/linkage.h>
21688 #include <asm/dwarf2.h>
21689+#include <asm/alternative-asm.h>
21690
21691 /*
21692 * override generic version in lib/iomap_copy.c
21693@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21694 CFI_STARTPROC
21695 movl %edx,%ecx
21696 rep movsd
21697+ pax_force_retaddr
21698 ret
21699 CFI_ENDPROC
21700 ENDPROC(__iowrite32_copy)
21701diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21702index efbf2a0..8893637 100644
21703--- a/arch/x86/lib/memcpy_64.S
21704+++ b/arch/x86/lib/memcpy_64.S
21705@@ -34,6 +34,7 @@
21706 rep movsq
21707 movl %edx, %ecx
21708 rep movsb
21709+ pax_force_retaddr
21710 ret
21711 .Lmemcpy_e:
21712 .previous
21713@@ -51,6 +52,7 @@
21714
21715 movl %edx, %ecx
21716 rep movsb
21717+ pax_force_retaddr
21718 ret
21719 .Lmemcpy_e_e:
21720 .previous
21721@@ -81,13 +83,13 @@ ENTRY(memcpy)
21722 */
21723 movq 0*8(%rsi), %r8
21724 movq 1*8(%rsi), %r9
21725- movq 2*8(%rsi), %r10
21726+ movq 2*8(%rsi), %rcx
21727 movq 3*8(%rsi), %r11
21728 leaq 4*8(%rsi), %rsi
21729
21730 movq %r8, 0*8(%rdi)
21731 movq %r9, 1*8(%rdi)
21732- movq %r10, 2*8(%rdi)
21733+ movq %rcx, 2*8(%rdi)
21734 movq %r11, 3*8(%rdi)
21735 leaq 4*8(%rdi), %rdi
21736 jae .Lcopy_forward_loop
21737@@ -110,12 +112,12 @@ ENTRY(memcpy)
21738 subq $0x20, %rdx
21739 movq -1*8(%rsi), %r8
21740 movq -2*8(%rsi), %r9
21741- movq -3*8(%rsi), %r10
21742+ movq -3*8(%rsi), %rcx
21743 movq -4*8(%rsi), %r11
21744 leaq -4*8(%rsi), %rsi
21745 movq %r8, -1*8(%rdi)
21746 movq %r9, -2*8(%rdi)
21747- movq %r10, -3*8(%rdi)
21748+ movq %rcx, -3*8(%rdi)
21749 movq %r11, -4*8(%rdi)
21750 leaq -4*8(%rdi), %rdi
21751 jae .Lcopy_backward_loop
21752@@ -135,12 +137,13 @@ ENTRY(memcpy)
21753 */
21754 movq 0*8(%rsi), %r8
21755 movq 1*8(%rsi), %r9
21756- movq -2*8(%rsi, %rdx), %r10
21757+ movq -2*8(%rsi, %rdx), %rcx
21758 movq -1*8(%rsi, %rdx), %r11
21759 movq %r8, 0*8(%rdi)
21760 movq %r9, 1*8(%rdi)
21761- movq %r10, -2*8(%rdi, %rdx)
21762+ movq %rcx, -2*8(%rdi, %rdx)
21763 movq %r11, -1*8(%rdi, %rdx)
21764+ pax_force_retaddr
21765 retq
21766 .p2align 4
21767 .Lless_16bytes:
21768@@ -153,6 +156,7 @@ ENTRY(memcpy)
21769 movq -1*8(%rsi, %rdx), %r9
21770 movq %r8, 0*8(%rdi)
21771 movq %r9, -1*8(%rdi, %rdx)
21772+ pax_force_retaddr
21773 retq
21774 .p2align 4
21775 .Lless_8bytes:
21776@@ -166,6 +170,7 @@ ENTRY(memcpy)
21777 movl -4(%rsi, %rdx), %r8d
21778 movl %ecx, (%rdi)
21779 movl %r8d, -4(%rdi, %rdx)
21780+ pax_force_retaddr
21781 retq
21782 .p2align 4
21783 .Lless_3bytes:
21784@@ -183,6 +188,7 @@ ENTRY(memcpy)
21785 jnz .Lloop_1
21786
21787 .Lend:
21788+ pax_force_retaddr
21789 retq
21790 CFI_ENDPROC
21791 ENDPROC(memcpy)
21792diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
21793index ee16461..c39c199 100644
21794--- a/arch/x86/lib/memmove_64.S
21795+++ b/arch/x86/lib/memmove_64.S
21796@@ -61,13 +61,13 @@ ENTRY(memmove)
21797 5:
21798 sub $0x20, %rdx
21799 movq 0*8(%rsi), %r11
21800- movq 1*8(%rsi), %r10
21801+ movq 1*8(%rsi), %rcx
21802 movq 2*8(%rsi), %r9
21803 movq 3*8(%rsi), %r8
21804 leaq 4*8(%rsi), %rsi
21805
21806 movq %r11, 0*8(%rdi)
21807- movq %r10, 1*8(%rdi)
21808+ movq %rcx, 1*8(%rdi)
21809 movq %r9, 2*8(%rdi)
21810 movq %r8, 3*8(%rdi)
21811 leaq 4*8(%rdi), %rdi
21812@@ -81,10 +81,10 @@ ENTRY(memmove)
21813 4:
21814 movq %rdx, %rcx
21815 movq -8(%rsi, %rdx), %r11
21816- lea -8(%rdi, %rdx), %r10
21817+ lea -8(%rdi, %rdx), %r9
21818 shrq $3, %rcx
21819 rep movsq
21820- movq %r11, (%r10)
21821+ movq %r11, (%r9)
21822 jmp 13f
21823 .Lmemmove_end_forward:
21824
21825@@ -95,14 +95,14 @@ ENTRY(memmove)
21826 7:
21827 movq %rdx, %rcx
21828 movq (%rsi), %r11
21829- movq %rdi, %r10
21830+ movq %rdi, %r9
21831 leaq -8(%rsi, %rdx), %rsi
21832 leaq -8(%rdi, %rdx), %rdi
21833 shrq $3, %rcx
21834 std
21835 rep movsq
21836 cld
21837- movq %r11, (%r10)
21838+ movq %r11, (%r9)
21839 jmp 13f
21840
21841 /*
21842@@ -127,13 +127,13 @@ ENTRY(memmove)
21843 8:
21844 subq $0x20, %rdx
21845 movq -1*8(%rsi), %r11
21846- movq -2*8(%rsi), %r10
21847+ movq -2*8(%rsi), %rcx
21848 movq -3*8(%rsi), %r9
21849 movq -4*8(%rsi), %r8
21850 leaq -4*8(%rsi), %rsi
21851
21852 movq %r11, -1*8(%rdi)
21853- movq %r10, -2*8(%rdi)
21854+ movq %rcx, -2*8(%rdi)
21855 movq %r9, -3*8(%rdi)
21856 movq %r8, -4*8(%rdi)
21857 leaq -4*8(%rdi), %rdi
21858@@ -151,11 +151,11 @@ ENTRY(memmove)
21859 * Move data from 16 bytes to 31 bytes.
21860 */
21861 movq 0*8(%rsi), %r11
21862- movq 1*8(%rsi), %r10
21863+ movq 1*8(%rsi), %rcx
21864 movq -2*8(%rsi, %rdx), %r9
21865 movq -1*8(%rsi, %rdx), %r8
21866 movq %r11, 0*8(%rdi)
21867- movq %r10, 1*8(%rdi)
21868+ movq %rcx, 1*8(%rdi)
21869 movq %r9, -2*8(%rdi, %rdx)
21870 movq %r8, -1*8(%rdi, %rdx)
21871 jmp 13f
21872@@ -167,9 +167,9 @@ ENTRY(memmove)
21873 * Move data from 8 bytes to 15 bytes.
21874 */
21875 movq 0*8(%rsi), %r11
21876- movq -1*8(%rsi, %rdx), %r10
21877+ movq -1*8(%rsi, %rdx), %r9
21878 movq %r11, 0*8(%rdi)
21879- movq %r10, -1*8(%rdi, %rdx)
21880+ movq %r9, -1*8(%rdi, %rdx)
21881 jmp 13f
21882 10:
21883 cmpq $4, %rdx
21884@@ -178,9 +178,9 @@ ENTRY(memmove)
21885 * Move data from 4 bytes to 7 bytes.
21886 */
21887 movl (%rsi), %r11d
21888- movl -4(%rsi, %rdx), %r10d
21889+ movl -4(%rsi, %rdx), %r9d
21890 movl %r11d, (%rdi)
21891- movl %r10d, -4(%rdi, %rdx)
21892+ movl %r9d, -4(%rdi, %rdx)
21893 jmp 13f
21894 11:
21895 cmp $2, %rdx
21896@@ -189,9 +189,9 @@ ENTRY(memmove)
21897 * Move data from 2 bytes to 3 bytes.
21898 */
21899 movw (%rsi), %r11w
21900- movw -2(%rsi, %rdx), %r10w
21901+ movw -2(%rsi, %rdx), %r9w
21902 movw %r11w, (%rdi)
21903- movw %r10w, -2(%rdi, %rdx)
21904+ movw %r9w, -2(%rdi, %rdx)
21905 jmp 13f
21906 12:
21907 cmp $1, %rdx
21908@@ -202,6 +202,7 @@ ENTRY(memmove)
21909 movb (%rsi), %r11b
21910 movb %r11b, (%rdi)
21911 13:
21912+ pax_force_retaddr
21913 retq
21914 CFI_ENDPROC
21915
21916@@ -210,6 +211,7 @@ ENTRY(memmove)
21917 /* Forward moving data. */
21918 movq %rdx, %rcx
21919 rep movsb
21920+ pax_force_retaddr
21921 retq
21922 .Lmemmove_end_forward_efs:
21923 .previous
21924diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
21925index 79bd454..dff325a 100644
21926--- a/arch/x86/lib/memset_64.S
21927+++ b/arch/x86/lib/memset_64.S
21928@@ -31,6 +31,7 @@
21929 movl %r8d,%ecx
21930 rep stosb
21931 movq %r9,%rax
21932+ pax_force_retaddr
21933 ret
21934 .Lmemset_e:
21935 .previous
21936@@ -53,6 +54,7 @@
21937 movl %edx,%ecx
21938 rep stosb
21939 movq %r9,%rax
21940+ pax_force_retaddr
21941 ret
21942 .Lmemset_e_e:
21943 .previous
21944@@ -60,13 +62,13 @@
21945 ENTRY(memset)
21946 ENTRY(__memset)
21947 CFI_STARTPROC
21948- movq %rdi,%r10
21949 movq %rdx,%r11
21950
21951 /* expand byte value */
21952 movzbl %sil,%ecx
21953 movabs $0x0101010101010101,%rax
21954 mul %rcx /* with rax, clobbers rdx */
21955+ movq %rdi,%rdx
21956
21957 /* align dst */
21958 movl %edi,%r9d
21959@@ -120,7 +122,8 @@ ENTRY(__memset)
21960 jnz .Lloop_1
21961
21962 .Lende:
21963- movq %r10,%rax
21964+ movq %rdx,%rax
21965+ pax_force_retaddr
21966 ret
21967
21968 CFI_RESTORE_STATE
21969diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
21970index c9f2d9b..e7fd2c0 100644
21971--- a/arch/x86/lib/mmx_32.c
21972+++ b/arch/x86/lib/mmx_32.c
21973@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
21974 {
21975 void *p;
21976 int i;
21977+ unsigned long cr0;
21978
21979 if (unlikely(in_interrupt()))
21980 return __memcpy(to, from, len);
21981@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
21982 kernel_fpu_begin();
21983
21984 __asm__ __volatile__ (
21985- "1: prefetch (%0)\n" /* This set is 28 bytes */
21986- " prefetch 64(%0)\n"
21987- " prefetch 128(%0)\n"
21988- " prefetch 192(%0)\n"
21989- " prefetch 256(%0)\n"
21990+ "1: prefetch (%1)\n" /* This set is 28 bytes */
21991+ " prefetch 64(%1)\n"
21992+ " prefetch 128(%1)\n"
21993+ " prefetch 192(%1)\n"
21994+ " prefetch 256(%1)\n"
21995 "2: \n"
21996 ".section .fixup, \"ax\"\n"
21997- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
21998+ "3: \n"
21999+
22000+#ifdef CONFIG_PAX_KERNEXEC
22001+ " movl %%cr0, %0\n"
22002+ " movl %0, %%eax\n"
22003+ " andl $0xFFFEFFFF, %%eax\n"
22004+ " movl %%eax, %%cr0\n"
22005+#endif
22006+
22007+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22008+
22009+#ifdef CONFIG_PAX_KERNEXEC
22010+ " movl %0, %%cr0\n"
22011+#endif
22012+
22013 " jmp 2b\n"
22014 ".previous\n"
22015 _ASM_EXTABLE(1b, 3b)
22016- : : "r" (from));
22017+ : "=&r" (cr0) : "r" (from) : "ax");
22018
22019 for ( ; i > 5; i--) {
22020 __asm__ __volatile__ (
22021- "1: prefetch 320(%0)\n"
22022- "2: movq (%0), %%mm0\n"
22023- " movq 8(%0), %%mm1\n"
22024- " movq 16(%0), %%mm2\n"
22025- " movq 24(%0), %%mm3\n"
22026- " movq %%mm0, (%1)\n"
22027- " movq %%mm1, 8(%1)\n"
22028- " movq %%mm2, 16(%1)\n"
22029- " movq %%mm3, 24(%1)\n"
22030- " movq 32(%0), %%mm0\n"
22031- " movq 40(%0), %%mm1\n"
22032- " movq 48(%0), %%mm2\n"
22033- " movq 56(%0), %%mm3\n"
22034- " movq %%mm0, 32(%1)\n"
22035- " movq %%mm1, 40(%1)\n"
22036- " movq %%mm2, 48(%1)\n"
22037- " movq %%mm3, 56(%1)\n"
22038+ "1: prefetch 320(%1)\n"
22039+ "2: movq (%1), %%mm0\n"
22040+ " movq 8(%1), %%mm1\n"
22041+ " movq 16(%1), %%mm2\n"
22042+ " movq 24(%1), %%mm3\n"
22043+ " movq %%mm0, (%2)\n"
22044+ " movq %%mm1, 8(%2)\n"
22045+ " movq %%mm2, 16(%2)\n"
22046+ " movq %%mm3, 24(%2)\n"
22047+ " movq 32(%1), %%mm0\n"
22048+ " movq 40(%1), %%mm1\n"
22049+ " movq 48(%1), %%mm2\n"
22050+ " movq 56(%1), %%mm3\n"
22051+ " movq %%mm0, 32(%2)\n"
22052+ " movq %%mm1, 40(%2)\n"
22053+ " movq %%mm2, 48(%2)\n"
22054+ " movq %%mm3, 56(%2)\n"
22055 ".section .fixup, \"ax\"\n"
22056- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22057+ "3:\n"
22058+
22059+#ifdef CONFIG_PAX_KERNEXEC
22060+ " movl %%cr0, %0\n"
22061+ " movl %0, %%eax\n"
22062+ " andl $0xFFFEFFFF, %%eax\n"
22063+ " movl %%eax, %%cr0\n"
22064+#endif
22065+
22066+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22067+
22068+#ifdef CONFIG_PAX_KERNEXEC
22069+ " movl %0, %%cr0\n"
22070+#endif
22071+
22072 " jmp 2b\n"
22073 ".previous\n"
22074 _ASM_EXTABLE(1b, 3b)
22075- : : "r" (from), "r" (to) : "memory");
22076+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22077
22078 from += 64;
22079 to += 64;
22080@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22081 static void fast_copy_page(void *to, void *from)
22082 {
22083 int i;
22084+ unsigned long cr0;
22085
22086 kernel_fpu_begin();
22087
22088@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22089 * but that is for later. -AV
22090 */
22091 __asm__ __volatile__(
22092- "1: prefetch (%0)\n"
22093- " prefetch 64(%0)\n"
22094- " prefetch 128(%0)\n"
22095- " prefetch 192(%0)\n"
22096- " prefetch 256(%0)\n"
22097+ "1: prefetch (%1)\n"
22098+ " prefetch 64(%1)\n"
22099+ " prefetch 128(%1)\n"
22100+ " prefetch 192(%1)\n"
22101+ " prefetch 256(%1)\n"
22102 "2: \n"
22103 ".section .fixup, \"ax\"\n"
22104- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22105+ "3: \n"
22106+
22107+#ifdef CONFIG_PAX_KERNEXEC
22108+ " movl %%cr0, %0\n"
22109+ " movl %0, %%eax\n"
22110+ " andl $0xFFFEFFFF, %%eax\n"
22111+ " movl %%eax, %%cr0\n"
22112+#endif
22113+
22114+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22115+
22116+#ifdef CONFIG_PAX_KERNEXEC
22117+ " movl %0, %%cr0\n"
22118+#endif
22119+
22120 " jmp 2b\n"
22121 ".previous\n"
22122- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22123+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22124
22125 for (i = 0; i < (4096-320)/64; i++) {
22126 __asm__ __volatile__ (
22127- "1: prefetch 320(%0)\n"
22128- "2: movq (%0), %%mm0\n"
22129- " movntq %%mm0, (%1)\n"
22130- " movq 8(%0), %%mm1\n"
22131- " movntq %%mm1, 8(%1)\n"
22132- " movq 16(%0), %%mm2\n"
22133- " movntq %%mm2, 16(%1)\n"
22134- " movq 24(%0), %%mm3\n"
22135- " movntq %%mm3, 24(%1)\n"
22136- " movq 32(%0), %%mm4\n"
22137- " movntq %%mm4, 32(%1)\n"
22138- " movq 40(%0), %%mm5\n"
22139- " movntq %%mm5, 40(%1)\n"
22140- " movq 48(%0), %%mm6\n"
22141- " movntq %%mm6, 48(%1)\n"
22142- " movq 56(%0), %%mm7\n"
22143- " movntq %%mm7, 56(%1)\n"
22144+ "1: prefetch 320(%1)\n"
22145+ "2: movq (%1), %%mm0\n"
22146+ " movntq %%mm0, (%2)\n"
22147+ " movq 8(%1), %%mm1\n"
22148+ " movntq %%mm1, 8(%2)\n"
22149+ " movq 16(%1), %%mm2\n"
22150+ " movntq %%mm2, 16(%2)\n"
22151+ " movq 24(%1), %%mm3\n"
22152+ " movntq %%mm3, 24(%2)\n"
22153+ " movq 32(%1), %%mm4\n"
22154+ " movntq %%mm4, 32(%2)\n"
22155+ " movq 40(%1), %%mm5\n"
22156+ " movntq %%mm5, 40(%2)\n"
22157+ " movq 48(%1), %%mm6\n"
22158+ " movntq %%mm6, 48(%2)\n"
22159+ " movq 56(%1), %%mm7\n"
22160+ " movntq %%mm7, 56(%2)\n"
22161 ".section .fixup, \"ax\"\n"
22162- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22163+ "3:\n"
22164+
22165+#ifdef CONFIG_PAX_KERNEXEC
22166+ " movl %%cr0, %0\n"
22167+ " movl %0, %%eax\n"
22168+ " andl $0xFFFEFFFF, %%eax\n"
22169+ " movl %%eax, %%cr0\n"
22170+#endif
22171+
22172+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22173+
22174+#ifdef CONFIG_PAX_KERNEXEC
22175+ " movl %0, %%cr0\n"
22176+#endif
22177+
22178 " jmp 2b\n"
22179 ".previous\n"
22180- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22181+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22182
22183 from += 64;
22184 to += 64;
22185@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22186 static void fast_copy_page(void *to, void *from)
22187 {
22188 int i;
22189+ unsigned long cr0;
22190
22191 kernel_fpu_begin();
22192
22193 __asm__ __volatile__ (
22194- "1: prefetch (%0)\n"
22195- " prefetch 64(%0)\n"
22196- " prefetch 128(%0)\n"
22197- " prefetch 192(%0)\n"
22198- " prefetch 256(%0)\n"
22199+ "1: prefetch (%1)\n"
22200+ " prefetch 64(%1)\n"
22201+ " prefetch 128(%1)\n"
22202+ " prefetch 192(%1)\n"
22203+ " prefetch 256(%1)\n"
22204 "2: \n"
22205 ".section .fixup, \"ax\"\n"
22206- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22207+ "3: \n"
22208+
22209+#ifdef CONFIG_PAX_KERNEXEC
22210+ " movl %%cr0, %0\n"
22211+ " movl %0, %%eax\n"
22212+ " andl $0xFFFEFFFF, %%eax\n"
22213+ " movl %%eax, %%cr0\n"
22214+#endif
22215+
22216+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22217+
22218+#ifdef CONFIG_PAX_KERNEXEC
22219+ " movl %0, %%cr0\n"
22220+#endif
22221+
22222 " jmp 2b\n"
22223 ".previous\n"
22224- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22225+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22226
22227 for (i = 0; i < 4096/64; i++) {
22228 __asm__ __volatile__ (
22229- "1: prefetch 320(%0)\n"
22230- "2: movq (%0), %%mm0\n"
22231- " movq 8(%0), %%mm1\n"
22232- " movq 16(%0), %%mm2\n"
22233- " movq 24(%0), %%mm3\n"
22234- " movq %%mm0, (%1)\n"
22235- " movq %%mm1, 8(%1)\n"
22236- " movq %%mm2, 16(%1)\n"
22237- " movq %%mm3, 24(%1)\n"
22238- " movq 32(%0), %%mm0\n"
22239- " movq 40(%0), %%mm1\n"
22240- " movq 48(%0), %%mm2\n"
22241- " movq 56(%0), %%mm3\n"
22242- " movq %%mm0, 32(%1)\n"
22243- " movq %%mm1, 40(%1)\n"
22244- " movq %%mm2, 48(%1)\n"
22245- " movq %%mm3, 56(%1)\n"
22246+ "1: prefetch 320(%1)\n"
22247+ "2: movq (%1), %%mm0\n"
22248+ " movq 8(%1), %%mm1\n"
22249+ " movq 16(%1), %%mm2\n"
22250+ " movq 24(%1), %%mm3\n"
22251+ " movq %%mm0, (%2)\n"
22252+ " movq %%mm1, 8(%2)\n"
22253+ " movq %%mm2, 16(%2)\n"
22254+ " movq %%mm3, 24(%2)\n"
22255+ " movq 32(%1), %%mm0\n"
22256+ " movq 40(%1), %%mm1\n"
22257+ " movq 48(%1), %%mm2\n"
22258+ " movq 56(%1), %%mm3\n"
22259+ " movq %%mm0, 32(%2)\n"
22260+ " movq %%mm1, 40(%2)\n"
22261+ " movq %%mm2, 48(%2)\n"
22262+ " movq %%mm3, 56(%2)\n"
22263 ".section .fixup, \"ax\"\n"
22264- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22265+ "3:\n"
22266+
22267+#ifdef CONFIG_PAX_KERNEXEC
22268+ " movl %%cr0, %0\n"
22269+ " movl %0, %%eax\n"
22270+ " andl $0xFFFEFFFF, %%eax\n"
22271+ " movl %%eax, %%cr0\n"
22272+#endif
22273+
22274+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22275+
22276+#ifdef CONFIG_PAX_KERNEXEC
22277+ " movl %0, %%cr0\n"
22278+#endif
22279+
22280 " jmp 2b\n"
22281 ".previous\n"
22282 _ASM_EXTABLE(1b, 3b)
22283- : : "r" (from), "r" (to) : "memory");
22284+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22285
22286 from += 64;
22287 to += 64;
22288diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22289index 69fa106..adda88b 100644
22290--- a/arch/x86/lib/msr-reg.S
22291+++ b/arch/x86/lib/msr-reg.S
22292@@ -3,6 +3,7 @@
22293 #include <asm/dwarf2.h>
22294 #include <asm/asm.h>
22295 #include <asm/msr.h>
22296+#include <asm/alternative-asm.h>
22297
22298 #ifdef CONFIG_X86_64
22299 /*
22300@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22301 CFI_STARTPROC
22302 pushq_cfi %rbx
22303 pushq_cfi %rbp
22304- movq %rdi, %r10 /* Save pointer */
22305+ movq %rdi, %r9 /* Save pointer */
22306 xorl %r11d, %r11d /* Return value */
22307 movl (%rdi), %eax
22308 movl 4(%rdi), %ecx
22309@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22310 movl 28(%rdi), %edi
22311 CFI_REMEMBER_STATE
22312 1: \op
22313-2: movl %eax, (%r10)
22314+2: movl %eax, (%r9)
22315 movl %r11d, %eax /* Return value */
22316- movl %ecx, 4(%r10)
22317- movl %edx, 8(%r10)
22318- movl %ebx, 12(%r10)
22319- movl %ebp, 20(%r10)
22320- movl %esi, 24(%r10)
22321- movl %edi, 28(%r10)
22322+ movl %ecx, 4(%r9)
22323+ movl %edx, 8(%r9)
22324+ movl %ebx, 12(%r9)
22325+ movl %ebp, 20(%r9)
22326+ movl %esi, 24(%r9)
22327+ movl %edi, 28(%r9)
22328 popq_cfi %rbp
22329 popq_cfi %rbx
22330+ pax_force_retaddr
22331 ret
22332 3:
22333 CFI_RESTORE_STATE
22334diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22335index 36b0d15..d381858 100644
22336--- a/arch/x86/lib/putuser.S
22337+++ b/arch/x86/lib/putuser.S
22338@@ -15,7 +15,9 @@
22339 #include <asm/thread_info.h>
22340 #include <asm/errno.h>
22341 #include <asm/asm.h>
22342-
22343+#include <asm/segment.h>
22344+#include <asm/pgtable.h>
22345+#include <asm/alternative-asm.h>
22346
22347 /*
22348 * __put_user_X
22349@@ -29,52 +31,119 @@
22350 * as they get called from within inline assembly.
22351 */
22352
22353-#define ENTER CFI_STARTPROC ; \
22354- GET_THREAD_INFO(%_ASM_BX)
22355-#define EXIT ret ; \
22356+#define ENTER CFI_STARTPROC
22357+#define EXIT pax_force_retaddr; ret ; \
22358 CFI_ENDPROC
22359
22360+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22361+#define _DEST %_ASM_CX,%_ASM_BX
22362+#else
22363+#define _DEST %_ASM_CX
22364+#endif
22365+
22366+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22367+#define __copyuser_seg gs;
22368+#else
22369+#define __copyuser_seg
22370+#endif
22371+
22372 .text
22373 ENTRY(__put_user_1)
22374 ENTER
22375+
22376+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22377+ GET_THREAD_INFO(%_ASM_BX)
22378 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22379 jae bad_put_user
22380-1: movb %al,(%_ASM_CX)
22381+
22382+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22383+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22384+ cmp %_ASM_BX,%_ASM_CX
22385+ jb 1234f
22386+ xor %ebx,%ebx
22387+1234:
22388+#endif
22389+
22390+#endif
22391+
22392+1: __copyuser_seg movb %al,(_DEST)
22393 xor %eax,%eax
22394 EXIT
22395 ENDPROC(__put_user_1)
22396
22397 ENTRY(__put_user_2)
22398 ENTER
22399+
22400+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22401+ GET_THREAD_INFO(%_ASM_BX)
22402 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22403 sub $1,%_ASM_BX
22404 cmp %_ASM_BX,%_ASM_CX
22405 jae bad_put_user
22406-2: movw %ax,(%_ASM_CX)
22407+
22408+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22409+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22410+ cmp %_ASM_BX,%_ASM_CX
22411+ jb 1234f
22412+ xor %ebx,%ebx
22413+1234:
22414+#endif
22415+
22416+#endif
22417+
22418+2: __copyuser_seg movw %ax,(_DEST)
22419 xor %eax,%eax
22420 EXIT
22421 ENDPROC(__put_user_2)
22422
22423 ENTRY(__put_user_4)
22424 ENTER
22425+
22426+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22427+ GET_THREAD_INFO(%_ASM_BX)
22428 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22429 sub $3,%_ASM_BX
22430 cmp %_ASM_BX,%_ASM_CX
22431 jae bad_put_user
22432-3: movl %eax,(%_ASM_CX)
22433+
22434+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22435+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22436+ cmp %_ASM_BX,%_ASM_CX
22437+ jb 1234f
22438+ xor %ebx,%ebx
22439+1234:
22440+#endif
22441+
22442+#endif
22443+
22444+3: __copyuser_seg movl %eax,(_DEST)
22445 xor %eax,%eax
22446 EXIT
22447 ENDPROC(__put_user_4)
22448
22449 ENTRY(__put_user_8)
22450 ENTER
22451+
22452+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22453+ GET_THREAD_INFO(%_ASM_BX)
22454 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22455 sub $7,%_ASM_BX
22456 cmp %_ASM_BX,%_ASM_CX
22457 jae bad_put_user
22458-4: mov %_ASM_AX,(%_ASM_CX)
22459+
22460+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22461+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22462+ cmp %_ASM_BX,%_ASM_CX
22463+ jb 1234f
22464+ xor %ebx,%ebx
22465+1234:
22466+#endif
22467+
22468+#endif
22469+
22470+4: __copyuser_seg mov %_ASM_AX,(_DEST)
22471 #ifdef CONFIG_X86_32
22472-5: movl %edx,4(%_ASM_CX)
22473+5: __copyuser_seg movl %edx,4(_DEST)
22474 #endif
22475 xor %eax,%eax
22476 EXIT
22477diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22478index 1cad221..de671ee 100644
22479--- a/arch/x86/lib/rwlock.S
22480+++ b/arch/x86/lib/rwlock.S
22481@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22482 FRAME
22483 0: LOCK_PREFIX
22484 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22485+
22486+#ifdef CONFIG_PAX_REFCOUNT
22487+ jno 1234f
22488+ LOCK_PREFIX
22489+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22490+ int $4
22491+1234:
22492+ _ASM_EXTABLE(1234b, 1234b)
22493+#endif
22494+
22495 1: rep; nop
22496 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22497 jne 1b
22498 LOCK_PREFIX
22499 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22500+
22501+#ifdef CONFIG_PAX_REFCOUNT
22502+ jno 1234f
22503+ LOCK_PREFIX
22504+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22505+ int $4
22506+1234:
22507+ _ASM_EXTABLE(1234b, 1234b)
22508+#endif
22509+
22510 jnz 0b
22511 ENDFRAME
22512+ pax_force_retaddr
22513 ret
22514 CFI_ENDPROC
22515 END(__write_lock_failed)
22516@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22517 FRAME
22518 0: LOCK_PREFIX
22519 READ_LOCK_SIZE(inc) (%__lock_ptr)
22520+
22521+#ifdef CONFIG_PAX_REFCOUNT
22522+ jno 1234f
22523+ LOCK_PREFIX
22524+ READ_LOCK_SIZE(dec) (%__lock_ptr)
22525+ int $4
22526+1234:
22527+ _ASM_EXTABLE(1234b, 1234b)
22528+#endif
22529+
22530 1: rep; nop
22531 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22532 js 1b
22533 LOCK_PREFIX
22534 READ_LOCK_SIZE(dec) (%__lock_ptr)
22535+
22536+#ifdef CONFIG_PAX_REFCOUNT
22537+ jno 1234f
22538+ LOCK_PREFIX
22539+ READ_LOCK_SIZE(inc) (%__lock_ptr)
22540+ int $4
22541+1234:
22542+ _ASM_EXTABLE(1234b, 1234b)
22543+#endif
22544+
22545 js 0b
22546 ENDFRAME
22547+ pax_force_retaddr
22548 ret
22549 CFI_ENDPROC
22550 END(__read_lock_failed)
22551diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22552index 5dff5f0..cadebf4 100644
22553--- a/arch/x86/lib/rwsem.S
22554+++ b/arch/x86/lib/rwsem.S
22555@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22556 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22557 CFI_RESTORE __ASM_REG(dx)
22558 restore_common_regs
22559+ pax_force_retaddr
22560 ret
22561 CFI_ENDPROC
22562 ENDPROC(call_rwsem_down_read_failed)
22563@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22564 movq %rax,%rdi
22565 call rwsem_down_write_failed
22566 restore_common_regs
22567+ pax_force_retaddr
22568 ret
22569 CFI_ENDPROC
22570 ENDPROC(call_rwsem_down_write_failed)
22571@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22572 movq %rax,%rdi
22573 call rwsem_wake
22574 restore_common_regs
22575-1: ret
22576+1: pax_force_retaddr
22577+ ret
22578 CFI_ENDPROC
22579 ENDPROC(call_rwsem_wake)
22580
22581@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22582 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22583 CFI_RESTORE __ASM_REG(dx)
22584 restore_common_regs
22585+ pax_force_retaddr
22586 ret
22587 CFI_ENDPROC
22588 ENDPROC(call_rwsem_downgrade_wake)
22589diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22590index a63efd6..ccecad8 100644
22591--- a/arch/x86/lib/thunk_64.S
22592+++ b/arch/x86/lib/thunk_64.S
22593@@ -8,6 +8,7 @@
22594 #include <linux/linkage.h>
22595 #include <asm/dwarf2.h>
22596 #include <asm/calling.h>
22597+#include <asm/alternative-asm.h>
22598
22599 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22600 .macro THUNK name, func, put_ret_addr_in_rdi=0
22601@@ -41,5 +42,6 @@
22602 SAVE_ARGS
22603 restore:
22604 RESTORE_ARGS
22605+ pax_force_retaddr
22606 ret
22607 CFI_ENDPROC
22608diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22609index e218d5d..a99a1eb 100644
22610--- a/arch/x86/lib/usercopy_32.c
22611+++ b/arch/x86/lib/usercopy_32.c
22612@@ -43,7 +43,7 @@ do { \
22613 __asm__ __volatile__( \
22614 " testl %1,%1\n" \
22615 " jz 2f\n" \
22616- "0: lodsb\n" \
22617+ "0: "__copyuser_seg"lodsb\n" \
22618 " stosb\n" \
22619 " testb %%al,%%al\n" \
22620 " jz 1f\n" \
22621@@ -128,10 +128,12 @@ do { \
22622 int __d0; \
22623 might_fault(); \
22624 __asm__ __volatile__( \
22625+ __COPYUSER_SET_ES \
22626 "0: rep; stosl\n" \
22627 " movl %2,%0\n" \
22628 "1: rep; stosb\n" \
22629 "2:\n" \
22630+ __COPYUSER_RESTORE_ES \
22631 ".section .fixup,\"ax\"\n" \
22632 "3: lea 0(%2,%0,4),%0\n" \
22633 " jmp 2b\n" \
22634@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
22635 might_fault();
22636
22637 __asm__ __volatile__(
22638+ __COPYUSER_SET_ES
22639 " testl %0, %0\n"
22640 " jz 3f\n"
22641 " andl %0,%%ecx\n"
22642@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
22643 " subl %%ecx,%0\n"
22644 " addl %0,%%eax\n"
22645 "1:\n"
22646+ __COPYUSER_RESTORE_ES
22647 ".section .fixup,\"ax\"\n"
22648 "2: xorl %%eax,%%eax\n"
22649 " jmp 1b\n"
22650@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
22651
22652 #ifdef CONFIG_X86_INTEL_USERCOPY
22653 static unsigned long
22654-__copy_user_intel(void __user *to, const void *from, unsigned long size)
22655+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22656 {
22657 int d0, d1;
22658 __asm__ __volatile__(
22659@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22660 " .align 2,0x90\n"
22661 "3: movl 0(%4), %%eax\n"
22662 "4: movl 4(%4), %%edx\n"
22663- "5: movl %%eax, 0(%3)\n"
22664- "6: movl %%edx, 4(%3)\n"
22665+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22666+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22667 "7: movl 8(%4), %%eax\n"
22668 "8: movl 12(%4),%%edx\n"
22669- "9: movl %%eax, 8(%3)\n"
22670- "10: movl %%edx, 12(%3)\n"
22671+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22672+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22673 "11: movl 16(%4), %%eax\n"
22674 "12: movl 20(%4), %%edx\n"
22675- "13: movl %%eax, 16(%3)\n"
22676- "14: movl %%edx, 20(%3)\n"
22677+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22678+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22679 "15: movl 24(%4), %%eax\n"
22680 "16: movl 28(%4), %%edx\n"
22681- "17: movl %%eax, 24(%3)\n"
22682- "18: movl %%edx, 28(%3)\n"
22683+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22684+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22685 "19: movl 32(%4), %%eax\n"
22686 "20: movl 36(%4), %%edx\n"
22687- "21: movl %%eax, 32(%3)\n"
22688- "22: movl %%edx, 36(%3)\n"
22689+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22690+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22691 "23: movl 40(%4), %%eax\n"
22692 "24: movl 44(%4), %%edx\n"
22693- "25: movl %%eax, 40(%3)\n"
22694- "26: movl %%edx, 44(%3)\n"
22695+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22696+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22697 "27: movl 48(%4), %%eax\n"
22698 "28: movl 52(%4), %%edx\n"
22699- "29: movl %%eax, 48(%3)\n"
22700- "30: movl %%edx, 52(%3)\n"
22701+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22702+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22703 "31: movl 56(%4), %%eax\n"
22704 "32: movl 60(%4), %%edx\n"
22705- "33: movl %%eax, 56(%3)\n"
22706- "34: movl %%edx, 60(%3)\n"
22707+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22708+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22709 " addl $-64, %0\n"
22710 " addl $64, %4\n"
22711 " addl $64, %3\n"
22712@@ -278,10 +282,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22713 " shrl $2, %0\n"
22714 " andl $3, %%eax\n"
22715 " cld\n"
22716+ __COPYUSER_SET_ES
22717 "99: rep; movsl\n"
22718 "36: movl %%eax, %0\n"
22719 "37: rep; movsb\n"
22720 "100:\n"
22721+ __COPYUSER_RESTORE_ES
22722 ".section .fixup,\"ax\"\n"
22723 "101: lea 0(%%eax,%0,4),%0\n"
22724 " jmp 100b\n"
22725@@ -334,46 +340,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22726 }
22727
22728 static unsigned long
22729+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
22730+{
22731+ int d0, d1;
22732+ __asm__ __volatile__(
22733+ " .align 2,0x90\n"
22734+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
22735+ " cmpl $67, %0\n"
22736+ " jbe 3f\n"
22737+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
22738+ " .align 2,0x90\n"
22739+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
22740+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
22741+ "5: movl %%eax, 0(%3)\n"
22742+ "6: movl %%edx, 4(%3)\n"
22743+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
22744+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
22745+ "9: movl %%eax, 8(%3)\n"
22746+ "10: movl %%edx, 12(%3)\n"
22747+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
22748+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
22749+ "13: movl %%eax, 16(%3)\n"
22750+ "14: movl %%edx, 20(%3)\n"
22751+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
22752+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
22753+ "17: movl %%eax, 24(%3)\n"
22754+ "18: movl %%edx, 28(%3)\n"
22755+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
22756+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
22757+ "21: movl %%eax, 32(%3)\n"
22758+ "22: movl %%edx, 36(%3)\n"
22759+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
22760+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
22761+ "25: movl %%eax, 40(%3)\n"
22762+ "26: movl %%edx, 44(%3)\n"
22763+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
22764+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
22765+ "29: movl %%eax, 48(%3)\n"
22766+ "30: movl %%edx, 52(%3)\n"
22767+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
22768+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
22769+ "33: movl %%eax, 56(%3)\n"
22770+ "34: movl %%edx, 60(%3)\n"
22771+ " addl $-64, %0\n"
22772+ " addl $64, %4\n"
22773+ " addl $64, %3\n"
22774+ " cmpl $63, %0\n"
22775+ " ja 1b\n"
22776+ "35: movl %0, %%eax\n"
22777+ " shrl $2, %0\n"
22778+ " andl $3, %%eax\n"
22779+ " cld\n"
22780+ "99: rep; "__copyuser_seg" movsl\n"
22781+ "36: movl %%eax, %0\n"
22782+ "37: rep; "__copyuser_seg" movsb\n"
22783+ "100:\n"
22784+ ".section .fixup,\"ax\"\n"
22785+ "101: lea 0(%%eax,%0,4),%0\n"
22786+ " jmp 100b\n"
22787+ ".previous\n"
22788+ ".section __ex_table,\"a\"\n"
22789+ " .align 4\n"
22790+ " .long 1b,100b\n"
22791+ " .long 2b,100b\n"
22792+ " .long 3b,100b\n"
22793+ " .long 4b,100b\n"
22794+ " .long 5b,100b\n"
22795+ " .long 6b,100b\n"
22796+ " .long 7b,100b\n"
22797+ " .long 8b,100b\n"
22798+ " .long 9b,100b\n"
22799+ " .long 10b,100b\n"
22800+ " .long 11b,100b\n"
22801+ " .long 12b,100b\n"
22802+ " .long 13b,100b\n"
22803+ " .long 14b,100b\n"
22804+ " .long 15b,100b\n"
22805+ " .long 16b,100b\n"
22806+ " .long 17b,100b\n"
22807+ " .long 18b,100b\n"
22808+ " .long 19b,100b\n"
22809+ " .long 20b,100b\n"
22810+ " .long 21b,100b\n"
22811+ " .long 22b,100b\n"
22812+ " .long 23b,100b\n"
22813+ " .long 24b,100b\n"
22814+ " .long 25b,100b\n"
22815+ " .long 26b,100b\n"
22816+ " .long 27b,100b\n"
22817+ " .long 28b,100b\n"
22818+ " .long 29b,100b\n"
22819+ " .long 30b,100b\n"
22820+ " .long 31b,100b\n"
22821+ " .long 32b,100b\n"
22822+ " .long 33b,100b\n"
22823+ " .long 34b,100b\n"
22824+ " .long 35b,100b\n"
22825+ " .long 36b,100b\n"
22826+ " .long 37b,100b\n"
22827+ " .long 99b,101b\n"
22828+ ".previous"
22829+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
22830+ : "1"(to), "2"(from), "0"(size)
22831+ : "eax", "edx", "memory");
22832+ return size;
22833+}
22834+
22835+static unsigned long
22836+__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
22837+static unsigned long
22838 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
22839 {
22840 int d0, d1;
22841 __asm__ __volatile__(
22842 " .align 2,0x90\n"
22843- "0: movl 32(%4), %%eax\n"
22844+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22845 " cmpl $67, %0\n"
22846 " jbe 2f\n"
22847- "1: movl 64(%4), %%eax\n"
22848+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22849 " .align 2,0x90\n"
22850- "2: movl 0(%4), %%eax\n"
22851- "21: movl 4(%4), %%edx\n"
22852+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22853+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22854 " movl %%eax, 0(%3)\n"
22855 " movl %%edx, 4(%3)\n"
22856- "3: movl 8(%4), %%eax\n"
22857- "31: movl 12(%4),%%edx\n"
22858+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22859+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22860 " movl %%eax, 8(%3)\n"
22861 " movl %%edx, 12(%3)\n"
22862- "4: movl 16(%4), %%eax\n"
22863- "41: movl 20(%4), %%edx\n"
22864+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22865+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22866 " movl %%eax, 16(%3)\n"
22867 " movl %%edx, 20(%3)\n"
22868- "10: movl 24(%4), %%eax\n"
22869- "51: movl 28(%4), %%edx\n"
22870+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22871+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22872 " movl %%eax, 24(%3)\n"
22873 " movl %%edx, 28(%3)\n"
22874- "11: movl 32(%4), %%eax\n"
22875- "61: movl 36(%4), %%edx\n"
22876+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22877+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22878 " movl %%eax, 32(%3)\n"
22879 " movl %%edx, 36(%3)\n"
22880- "12: movl 40(%4), %%eax\n"
22881- "71: movl 44(%4), %%edx\n"
22882+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22883+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22884 " movl %%eax, 40(%3)\n"
22885 " movl %%edx, 44(%3)\n"
22886- "13: movl 48(%4), %%eax\n"
22887- "81: movl 52(%4), %%edx\n"
22888+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22889+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22890 " movl %%eax, 48(%3)\n"
22891 " movl %%edx, 52(%3)\n"
22892- "14: movl 56(%4), %%eax\n"
22893- "91: movl 60(%4), %%edx\n"
22894+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22895+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22896 " movl %%eax, 56(%3)\n"
22897 " movl %%edx, 60(%3)\n"
22898 " addl $-64, %0\n"
22899@@ -385,9 +500,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
22900 " shrl $2, %0\n"
22901 " andl $3, %%eax\n"
22902 " cld\n"
22903- "6: rep; movsl\n"
22904+ "6: rep; "__copyuser_seg" movsl\n"
22905 " movl %%eax,%0\n"
22906- "7: rep; movsb\n"
22907+ "7: rep; "__copyuser_seg" movsb\n"
22908 "8:\n"
22909 ".section .fixup,\"ax\"\n"
22910 "9: lea 0(%%eax,%0,4),%0\n"
22911@@ -434,47 +549,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
22912 */
22913
22914 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22915+ const void __user *from, unsigned long size) __size_overflow(3);
22916+static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22917 const void __user *from, unsigned long size)
22918 {
22919 int d0, d1;
22920
22921 __asm__ __volatile__(
22922 " .align 2,0x90\n"
22923- "0: movl 32(%4), %%eax\n"
22924+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
22925 " cmpl $67, %0\n"
22926 " jbe 2f\n"
22927- "1: movl 64(%4), %%eax\n"
22928+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
22929 " .align 2,0x90\n"
22930- "2: movl 0(%4), %%eax\n"
22931- "21: movl 4(%4), %%edx\n"
22932+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
22933+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
22934 " movnti %%eax, 0(%3)\n"
22935 " movnti %%edx, 4(%3)\n"
22936- "3: movl 8(%4), %%eax\n"
22937- "31: movl 12(%4),%%edx\n"
22938+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
22939+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
22940 " movnti %%eax, 8(%3)\n"
22941 " movnti %%edx, 12(%3)\n"
22942- "4: movl 16(%4), %%eax\n"
22943- "41: movl 20(%4), %%edx\n"
22944+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
22945+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
22946 " movnti %%eax, 16(%3)\n"
22947 " movnti %%edx, 20(%3)\n"
22948- "10: movl 24(%4), %%eax\n"
22949- "51: movl 28(%4), %%edx\n"
22950+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
22951+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
22952 " movnti %%eax, 24(%3)\n"
22953 " movnti %%edx, 28(%3)\n"
22954- "11: movl 32(%4), %%eax\n"
22955- "61: movl 36(%4), %%edx\n"
22956+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
22957+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
22958 " movnti %%eax, 32(%3)\n"
22959 " movnti %%edx, 36(%3)\n"
22960- "12: movl 40(%4), %%eax\n"
22961- "71: movl 44(%4), %%edx\n"
22962+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
22963+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
22964 " movnti %%eax, 40(%3)\n"
22965 " movnti %%edx, 44(%3)\n"
22966- "13: movl 48(%4), %%eax\n"
22967- "81: movl 52(%4), %%edx\n"
22968+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
22969+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
22970 " movnti %%eax, 48(%3)\n"
22971 " movnti %%edx, 52(%3)\n"
22972- "14: movl 56(%4), %%eax\n"
22973- "91: movl 60(%4), %%edx\n"
22974+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
22975+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
22976 " movnti %%eax, 56(%3)\n"
22977 " movnti %%edx, 60(%3)\n"
22978 " addl $-64, %0\n"
22979@@ -487,9 +604,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22980 " shrl $2, %0\n"
22981 " andl $3, %%eax\n"
22982 " cld\n"
22983- "6: rep; movsl\n"
22984+ "6: rep; "__copyuser_seg" movsl\n"
22985 " movl %%eax,%0\n"
22986- "7: rep; movsb\n"
22987+ "7: rep; "__copyuser_seg" movsb\n"
22988 "8:\n"
22989 ".section .fixup,\"ax\"\n"
22990 "9: lea 0(%%eax,%0,4),%0\n"
22991@@ -531,47 +648,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
22992 }
22993
22994 static unsigned long __copy_user_intel_nocache(void *to,
22995+ const void __user *from, unsigned long size) __size_overflow(3);
22996+static unsigned long __copy_user_intel_nocache(void *to,
22997 const void __user *from, unsigned long size)
22998 {
22999 int d0, d1;
23000
23001 __asm__ __volatile__(
23002 " .align 2,0x90\n"
23003- "0: movl 32(%4), %%eax\n"
23004+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23005 " cmpl $67, %0\n"
23006 " jbe 2f\n"
23007- "1: movl 64(%4), %%eax\n"
23008+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23009 " .align 2,0x90\n"
23010- "2: movl 0(%4), %%eax\n"
23011- "21: movl 4(%4), %%edx\n"
23012+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23013+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23014 " movnti %%eax, 0(%3)\n"
23015 " movnti %%edx, 4(%3)\n"
23016- "3: movl 8(%4), %%eax\n"
23017- "31: movl 12(%4),%%edx\n"
23018+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23019+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23020 " movnti %%eax, 8(%3)\n"
23021 " movnti %%edx, 12(%3)\n"
23022- "4: movl 16(%4), %%eax\n"
23023- "41: movl 20(%4), %%edx\n"
23024+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23025+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23026 " movnti %%eax, 16(%3)\n"
23027 " movnti %%edx, 20(%3)\n"
23028- "10: movl 24(%4), %%eax\n"
23029- "51: movl 28(%4), %%edx\n"
23030+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23031+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23032 " movnti %%eax, 24(%3)\n"
23033 " movnti %%edx, 28(%3)\n"
23034- "11: movl 32(%4), %%eax\n"
23035- "61: movl 36(%4), %%edx\n"
23036+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23037+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23038 " movnti %%eax, 32(%3)\n"
23039 " movnti %%edx, 36(%3)\n"
23040- "12: movl 40(%4), %%eax\n"
23041- "71: movl 44(%4), %%edx\n"
23042+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23043+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23044 " movnti %%eax, 40(%3)\n"
23045 " movnti %%edx, 44(%3)\n"
23046- "13: movl 48(%4), %%eax\n"
23047- "81: movl 52(%4), %%edx\n"
23048+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23049+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23050 " movnti %%eax, 48(%3)\n"
23051 " movnti %%edx, 52(%3)\n"
23052- "14: movl 56(%4), %%eax\n"
23053- "91: movl 60(%4), %%edx\n"
23054+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23055+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23056 " movnti %%eax, 56(%3)\n"
23057 " movnti %%edx, 60(%3)\n"
23058 " addl $-64, %0\n"
23059@@ -584,9 +703,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23060 " shrl $2, %0\n"
23061 " andl $3, %%eax\n"
23062 " cld\n"
23063- "6: rep; movsl\n"
23064+ "6: rep; "__copyuser_seg" movsl\n"
23065 " movl %%eax,%0\n"
23066- "7: rep; movsb\n"
23067+ "7: rep; "__copyuser_seg" movsb\n"
23068 "8:\n"
23069 ".section .fixup,\"ax\"\n"
23070 "9: lea 0(%%eax,%0,4),%0\n"
23071@@ -629,32 +748,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23072 */
23073 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23074 unsigned long size);
23075-unsigned long __copy_user_intel(void __user *to, const void *from,
23076+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23077+ unsigned long size);
23078+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23079 unsigned long size);
23080 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23081 const void __user *from, unsigned long size);
23082 #endif /* CONFIG_X86_INTEL_USERCOPY */
23083
23084 /* Generic arbitrary sized copy. */
23085-#define __copy_user(to, from, size) \
23086+#define __copy_user(to, from, size, prefix, set, restore) \
23087 do { \
23088 int __d0, __d1, __d2; \
23089 __asm__ __volatile__( \
23090+ set \
23091 " cmp $7,%0\n" \
23092 " jbe 1f\n" \
23093 " movl %1,%0\n" \
23094 " negl %0\n" \
23095 " andl $7,%0\n" \
23096 " subl %0,%3\n" \
23097- "4: rep; movsb\n" \
23098+ "4: rep; "prefix"movsb\n" \
23099 " movl %3,%0\n" \
23100 " shrl $2,%0\n" \
23101 " andl $3,%3\n" \
23102 " .align 2,0x90\n" \
23103- "0: rep; movsl\n" \
23104+ "0: rep; "prefix"movsl\n" \
23105 " movl %3,%0\n" \
23106- "1: rep; movsb\n" \
23107+ "1: rep; "prefix"movsb\n" \
23108 "2:\n" \
23109+ restore \
23110 ".section .fixup,\"ax\"\n" \
23111 "5: addl %3,%0\n" \
23112 " jmp 2b\n" \
23113@@ -682,14 +805,14 @@ do { \
23114 " negl %0\n" \
23115 " andl $7,%0\n" \
23116 " subl %0,%3\n" \
23117- "4: rep; movsb\n" \
23118+ "4: rep; "__copyuser_seg"movsb\n" \
23119 " movl %3,%0\n" \
23120 " shrl $2,%0\n" \
23121 " andl $3,%3\n" \
23122 " .align 2,0x90\n" \
23123- "0: rep; movsl\n" \
23124+ "0: rep; "__copyuser_seg"movsl\n" \
23125 " movl %3,%0\n" \
23126- "1: rep; movsb\n" \
23127+ "1: rep; "__copyuser_seg"movsb\n" \
23128 "2:\n" \
23129 ".section .fixup,\"ax\"\n" \
23130 "5: addl %3,%0\n" \
23131@@ -775,9 +898,9 @@ survive:
23132 }
23133 #endif
23134 if (movsl_is_ok(to, from, n))
23135- __copy_user(to, from, n);
23136+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23137 else
23138- n = __copy_user_intel(to, from, n);
23139+ n = __generic_copy_to_user_intel(to, from, n);
23140 return n;
23141 }
23142 EXPORT_SYMBOL(__copy_to_user_ll);
23143@@ -797,10 +920,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23144 unsigned long n)
23145 {
23146 if (movsl_is_ok(to, from, n))
23147- __copy_user(to, from, n);
23148+ __copy_user(to, from, n, __copyuser_seg, "", "");
23149 else
23150- n = __copy_user_intel((void __user *)to,
23151- (const void *)from, n);
23152+ n = __generic_copy_from_user_intel(to, from, n);
23153 return n;
23154 }
23155 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23156@@ -827,65 +949,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23157 if (n > 64 && cpu_has_xmm2)
23158 n = __copy_user_intel_nocache(to, from, n);
23159 else
23160- __copy_user(to, from, n);
23161+ __copy_user(to, from, n, __copyuser_seg, "", "");
23162 #else
23163- __copy_user(to, from, n);
23164+ __copy_user(to, from, n, __copyuser_seg, "", "");
23165 #endif
23166 return n;
23167 }
23168 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23169
23170-/**
23171- * copy_to_user: - Copy a block of data into user space.
23172- * @to: Destination address, in user space.
23173- * @from: Source address, in kernel space.
23174- * @n: Number of bytes to copy.
23175- *
23176- * Context: User context only. This function may sleep.
23177- *
23178- * Copy data from kernel space to user space.
23179- *
23180- * Returns number of bytes that could not be copied.
23181- * On success, this will be zero.
23182- */
23183-unsigned long
23184-copy_to_user(void __user *to, const void *from, unsigned long n)
23185-{
23186- if (access_ok(VERIFY_WRITE, to, n))
23187- n = __copy_to_user(to, from, n);
23188- return n;
23189-}
23190-EXPORT_SYMBOL(copy_to_user);
23191-
23192-/**
23193- * copy_from_user: - Copy a block of data from user space.
23194- * @to: Destination address, in kernel space.
23195- * @from: Source address, in user space.
23196- * @n: Number of bytes to copy.
23197- *
23198- * Context: User context only. This function may sleep.
23199- *
23200- * Copy data from user space to kernel space.
23201- *
23202- * Returns number of bytes that could not be copied.
23203- * On success, this will be zero.
23204- *
23205- * If some data could not be copied, this function will pad the copied
23206- * data to the requested size using zero bytes.
23207- */
23208-unsigned long
23209-_copy_from_user(void *to, const void __user *from, unsigned long n)
23210-{
23211- if (access_ok(VERIFY_READ, from, n))
23212- n = __copy_from_user(to, from, n);
23213- else
23214- memset(to, 0, n);
23215- return n;
23216-}
23217-EXPORT_SYMBOL(_copy_from_user);
23218-
23219 void copy_from_user_overflow(void)
23220 {
23221 WARN(1, "Buffer overflow detected!\n");
23222 }
23223 EXPORT_SYMBOL(copy_from_user_overflow);
23224+
23225+void copy_to_user_overflow(void)
23226+{
23227+ WARN(1, "Buffer overflow detected!\n");
23228+}
23229+EXPORT_SYMBOL(copy_to_user_overflow);
23230+
23231+#ifdef CONFIG_PAX_MEMORY_UDEREF
23232+void __set_fs(mm_segment_t x)
23233+{
23234+ switch (x.seg) {
23235+ case 0:
23236+ loadsegment(gs, 0);
23237+ break;
23238+ case TASK_SIZE_MAX:
23239+ loadsegment(gs, __USER_DS);
23240+ break;
23241+ case -1UL:
23242+ loadsegment(gs, __KERNEL_DS);
23243+ break;
23244+ default:
23245+ BUG();
23246+ }
23247+ return;
23248+}
23249+EXPORT_SYMBOL(__set_fs);
23250+
23251+void set_fs(mm_segment_t x)
23252+{
23253+ current_thread_info()->addr_limit = x;
23254+ __set_fs(x);
23255+}
23256+EXPORT_SYMBOL(set_fs);
23257+#endif
23258diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23259index b7c2849..8633ad8 100644
23260--- a/arch/x86/lib/usercopy_64.c
23261+++ b/arch/x86/lib/usercopy_64.c
23262@@ -42,6 +42,12 @@ long
23263 __strncpy_from_user(char *dst, const char __user *src, long count)
23264 {
23265 long res;
23266+
23267+#ifdef CONFIG_PAX_MEMORY_UDEREF
23268+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23269+ src += PAX_USER_SHADOW_BASE;
23270+#endif
23271+
23272 __do_strncpy_from_user(dst, src, count, res);
23273 return res;
23274 }
23275@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23276 {
23277 long __d0;
23278 might_fault();
23279+
23280+#ifdef CONFIG_PAX_MEMORY_UDEREF
23281+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23282+ addr += PAX_USER_SHADOW_BASE;
23283+#endif
23284+
23285 /* no memory constraint because it doesn't change any memory gcc knows
23286 about */
23287 asm volatile(
23288@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23289 }
23290 EXPORT_SYMBOL(strlen_user);
23291
23292-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23293+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23294 {
23295- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23296- return copy_user_generic((__force void *)to, (__force void *)from, len);
23297- }
23298- return len;
23299+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23300+
23301+#ifdef CONFIG_PAX_MEMORY_UDEREF
23302+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23303+ to += PAX_USER_SHADOW_BASE;
23304+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23305+ from += PAX_USER_SHADOW_BASE;
23306+#endif
23307+
23308+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23309+ }
23310+ return len;
23311 }
23312 EXPORT_SYMBOL(copy_in_user);
23313
23314@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23315 * it is not necessary to optimize tail handling.
23316 */
23317 unsigned long
23318-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23319+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23320 {
23321 char c;
23322 unsigned zero_len;
23323diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23324index 1fb85db..8b3540b 100644
23325--- a/arch/x86/mm/extable.c
23326+++ b/arch/x86/mm/extable.c
23327@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
23328 const struct exception_table_entry *fixup;
23329
23330 #ifdef CONFIG_PNPBIOS
23331- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23332+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23333 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23334 extern u32 pnp_bios_is_utter_crap;
23335 pnp_bios_is_utter_crap = 1;
23336diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23337index f0b4caf..d92fd42 100644
23338--- a/arch/x86/mm/fault.c
23339+++ b/arch/x86/mm/fault.c
23340@@ -13,11 +13,18 @@
23341 #include <linux/perf_event.h> /* perf_sw_event */
23342 #include <linux/hugetlb.h> /* hstate_index_to_shift */
23343 #include <linux/prefetch.h> /* prefetchw */
23344+#include <linux/unistd.h>
23345+#include <linux/compiler.h>
23346
23347 #include <asm/traps.h> /* dotraplinkage, ... */
23348 #include <asm/pgalloc.h> /* pgd_*(), ... */
23349 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23350 #include <asm/fixmap.h> /* VSYSCALL_START */
23351+#include <asm/tlbflush.h>
23352+
23353+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23354+#include <asm/stacktrace.h>
23355+#endif
23356
23357 /*
23358 * Page fault error code bits:
23359@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
23360 int ret = 0;
23361
23362 /* kprobe_running() needs smp_processor_id() */
23363- if (kprobes_built_in() && !user_mode_vm(regs)) {
23364+ if (kprobes_built_in() && !user_mode(regs)) {
23365 preempt_disable();
23366 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23367 ret = 1;
23368@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23369 return !instr_lo || (instr_lo>>1) == 1;
23370 case 0x00:
23371 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23372- if (probe_kernel_address(instr, opcode))
23373+ if (user_mode(regs)) {
23374+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23375+ return 0;
23376+ } else if (probe_kernel_address(instr, opcode))
23377 return 0;
23378
23379 *prefetch = (instr_lo == 0xF) &&
23380@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23381 while (instr < max_instr) {
23382 unsigned char opcode;
23383
23384- if (probe_kernel_address(instr, opcode))
23385+ if (user_mode(regs)) {
23386+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23387+ break;
23388+ } else if (probe_kernel_address(instr, opcode))
23389 break;
23390
23391 instr++;
23392@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23393 force_sig_info(si_signo, &info, tsk);
23394 }
23395
23396+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23397+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23398+#endif
23399+
23400+#ifdef CONFIG_PAX_EMUTRAMP
23401+static int pax_handle_fetch_fault(struct pt_regs *regs);
23402+#endif
23403+
23404+#ifdef CONFIG_PAX_PAGEEXEC
23405+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23406+{
23407+ pgd_t *pgd;
23408+ pud_t *pud;
23409+ pmd_t *pmd;
23410+
23411+ pgd = pgd_offset(mm, address);
23412+ if (!pgd_present(*pgd))
23413+ return NULL;
23414+ pud = pud_offset(pgd, address);
23415+ if (!pud_present(*pud))
23416+ return NULL;
23417+ pmd = pmd_offset(pud, address);
23418+ if (!pmd_present(*pmd))
23419+ return NULL;
23420+ return pmd;
23421+}
23422+#endif
23423+
23424 DEFINE_SPINLOCK(pgd_lock);
23425 LIST_HEAD(pgd_list);
23426
23427@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23428 for (address = VMALLOC_START & PMD_MASK;
23429 address >= TASK_SIZE && address < FIXADDR_TOP;
23430 address += PMD_SIZE) {
23431+
23432+#ifdef CONFIG_PAX_PER_CPU_PGD
23433+ unsigned long cpu;
23434+#else
23435 struct page *page;
23436+#endif
23437
23438 spin_lock(&pgd_lock);
23439+
23440+#ifdef CONFIG_PAX_PER_CPU_PGD
23441+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23442+ pgd_t *pgd = get_cpu_pgd(cpu);
23443+ pmd_t *ret;
23444+#else
23445 list_for_each_entry(page, &pgd_list, lru) {
23446+ pgd_t *pgd = page_address(page);
23447 spinlock_t *pgt_lock;
23448 pmd_t *ret;
23449
23450@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23451 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23452
23453 spin_lock(pgt_lock);
23454- ret = vmalloc_sync_one(page_address(page), address);
23455+#endif
23456+
23457+ ret = vmalloc_sync_one(pgd, address);
23458+
23459+#ifndef CONFIG_PAX_PER_CPU_PGD
23460 spin_unlock(pgt_lock);
23461+#endif
23462
23463 if (!ret)
23464 break;
23465@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23466 * an interrupt in the middle of a task switch..
23467 */
23468 pgd_paddr = read_cr3();
23469+
23470+#ifdef CONFIG_PAX_PER_CPU_PGD
23471+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23472+#endif
23473+
23474 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23475 if (!pmd_k)
23476 return -1;
23477@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23478 * happen within a race in page table update. In the later
23479 * case just flush:
23480 */
23481+
23482+#ifdef CONFIG_PAX_PER_CPU_PGD
23483+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23484+ pgd = pgd_offset_cpu(smp_processor_id(), address);
23485+#else
23486 pgd = pgd_offset(current->active_mm, address);
23487+#endif
23488+
23489 pgd_ref = pgd_offset_k(address);
23490 if (pgd_none(*pgd_ref))
23491 return -1;
23492@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23493 static int is_errata100(struct pt_regs *regs, unsigned long address)
23494 {
23495 #ifdef CONFIG_X86_64
23496- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23497+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23498 return 1;
23499 #endif
23500 return 0;
23501@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23502 }
23503
23504 static const char nx_warning[] = KERN_CRIT
23505-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23506+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23507
23508 static void
23509 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23510@@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23511 if (!oops_may_print())
23512 return;
23513
23514- if (error_code & PF_INSTR) {
23515+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23516 unsigned int level;
23517
23518 pte_t *pte = lookup_address(address, &level);
23519
23520 if (pte && pte_present(*pte) && !pte_exec(*pte))
23521- printk(nx_warning, current_uid());
23522+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23523 }
23524
23525+#ifdef CONFIG_PAX_KERNEXEC
23526+ if (init_mm.start_code <= address && address < init_mm.end_code) {
23527+ if (current->signal->curr_ip)
23528+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23529+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23530+ else
23531+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23532+ current->comm, task_pid_nr(current), current_uid(), current_euid());
23533+ }
23534+#endif
23535+
23536 printk(KERN_ALERT "BUG: unable to handle kernel ");
23537 if (address < PAGE_SIZE)
23538 printk(KERN_CONT "NULL pointer dereference");
23539@@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23540 }
23541 #endif
23542
23543+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23544+ if (pax_is_fetch_fault(regs, error_code, address)) {
23545+
23546+#ifdef CONFIG_PAX_EMUTRAMP
23547+ switch (pax_handle_fetch_fault(regs)) {
23548+ case 2:
23549+ return;
23550+ }
23551+#endif
23552+
23553+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23554+ do_group_exit(SIGKILL);
23555+ }
23556+#endif
23557+
23558 if (unlikely(show_unhandled_signals))
23559 show_signal_msg(regs, error_code, address, tsk);
23560
23561@@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23562 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23563 printk(KERN_ERR
23564 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23565- tsk->comm, tsk->pid, address);
23566+ tsk->comm, task_pid_nr(tsk), address);
23567 code = BUS_MCEERR_AR;
23568 }
23569 #endif
23570@@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23571 return 1;
23572 }
23573
23574+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23575+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23576+{
23577+ pte_t *pte;
23578+ pmd_t *pmd;
23579+ spinlock_t *ptl;
23580+ unsigned char pte_mask;
23581+
23582+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23583+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
23584+ return 0;
23585+
23586+ /* PaX: it's our fault, let's handle it if we can */
23587+
23588+ /* PaX: take a look at read faults before acquiring any locks */
23589+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23590+ /* instruction fetch attempt from a protected page in user mode */
23591+ up_read(&mm->mmap_sem);
23592+
23593+#ifdef CONFIG_PAX_EMUTRAMP
23594+ switch (pax_handle_fetch_fault(regs)) {
23595+ case 2:
23596+ return 1;
23597+ }
23598+#endif
23599+
23600+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23601+ do_group_exit(SIGKILL);
23602+ }
23603+
23604+ pmd = pax_get_pmd(mm, address);
23605+ if (unlikely(!pmd))
23606+ return 0;
23607+
23608+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23609+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23610+ pte_unmap_unlock(pte, ptl);
23611+ return 0;
23612+ }
23613+
23614+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23615+ /* write attempt to a protected page in user mode */
23616+ pte_unmap_unlock(pte, ptl);
23617+ return 0;
23618+ }
23619+
23620+#ifdef CONFIG_SMP
23621+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23622+#else
23623+ if (likely(address > get_limit(regs->cs)))
23624+#endif
23625+ {
23626+ set_pte(pte, pte_mkread(*pte));
23627+ __flush_tlb_one(address);
23628+ pte_unmap_unlock(pte, ptl);
23629+ up_read(&mm->mmap_sem);
23630+ return 1;
23631+ }
23632+
23633+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23634+
23635+ /*
23636+ * PaX: fill DTLB with user rights and retry
23637+ */
23638+ __asm__ __volatile__ (
23639+ "orb %2,(%1)\n"
23640+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23641+/*
23642+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23643+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23644+ * page fault when examined during a TLB load attempt. this is true not only
23645+ * for PTEs holding a non-present entry but also present entries that will
23646+ * raise a page fault (such as those set up by PaX, or the copy-on-write
23647+ * mechanism). in effect it means that we do *not* need to flush the TLBs
23648+ * for our target pages since their PTEs are simply not in the TLBs at all.
23649+
23650+ * the best thing in omitting it is that we gain around 15-20% speed in the
23651+ * fast path of the page fault handler and can get rid of tracing since we
23652+ * can no longer flush unintended entries.
23653+ */
23654+ "invlpg (%0)\n"
23655+#endif
23656+ __copyuser_seg"testb $0,(%0)\n"
23657+ "xorb %3,(%1)\n"
23658+ :
23659+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23660+ : "memory", "cc");
23661+ pte_unmap_unlock(pte, ptl);
23662+ up_read(&mm->mmap_sem);
23663+ return 1;
23664+}
23665+#endif
23666+
23667 /*
23668 * Handle a spurious fault caused by a stale TLB entry.
23669 *
23670@@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23671 static inline int
23672 access_error(unsigned long error_code, struct vm_area_struct *vma)
23673 {
23674+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23675+ return 1;
23676+
23677 if (error_code & PF_WRITE) {
23678 /* write, present and write, not present: */
23679 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23680@@ -1005,18 +1197,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23681 {
23682 struct vm_area_struct *vma;
23683 struct task_struct *tsk;
23684- unsigned long address;
23685 struct mm_struct *mm;
23686 int fault;
23687 int write = error_code & PF_WRITE;
23688 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23689 (write ? FAULT_FLAG_WRITE : 0);
23690
23691- tsk = current;
23692- mm = tsk->mm;
23693-
23694 /* Get the faulting address: */
23695- address = read_cr2();
23696+ unsigned long address = read_cr2();
23697+
23698+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23699+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23700+ if (!search_exception_tables(regs->ip)) {
23701+ bad_area_nosemaphore(regs, error_code, address);
23702+ return;
23703+ }
23704+ if (address < PAX_USER_SHADOW_BASE) {
23705+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23706+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23707+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23708+ } else
23709+ address -= PAX_USER_SHADOW_BASE;
23710+ }
23711+#endif
23712+
23713+ tsk = current;
23714+ mm = tsk->mm;
23715
23716 /*
23717 * Detect and handle instructions that would cause a page fault for
23718@@ -1077,7 +1283,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23719 * User-mode registers count as a user access even for any
23720 * potential system fault or CPU buglet:
23721 */
23722- if (user_mode_vm(regs)) {
23723+ if (user_mode(regs)) {
23724 local_irq_enable();
23725 error_code |= PF_USER;
23726 } else {
23727@@ -1132,6 +1338,11 @@ retry:
23728 might_sleep();
23729 }
23730
23731+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23732+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
23733+ return;
23734+#endif
23735+
23736 vma = find_vma(mm, address);
23737 if (unlikely(!vma)) {
23738 bad_area(regs, error_code, address);
23739@@ -1143,18 +1354,24 @@ retry:
23740 bad_area(regs, error_code, address);
23741 return;
23742 }
23743- if (error_code & PF_USER) {
23744- /*
23745- * Accessing the stack below %sp is always a bug.
23746- * The large cushion allows instructions like enter
23747- * and pusha to work. ("enter $65535, $31" pushes
23748- * 32 pointers and then decrements %sp by 65535.)
23749- */
23750- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
23751- bad_area(regs, error_code, address);
23752- return;
23753- }
23754+ /*
23755+ * Accessing the stack below %sp is always a bug.
23756+ * The large cushion allows instructions like enter
23757+ * and pusha to work. ("enter $65535, $31" pushes
23758+ * 32 pointers and then decrements %sp by 65535.)
23759+ */
23760+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
23761+ bad_area(regs, error_code, address);
23762+ return;
23763 }
23764+
23765+#ifdef CONFIG_PAX_SEGMEXEC
23766+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
23767+ bad_area(regs, error_code, address);
23768+ return;
23769+ }
23770+#endif
23771+
23772 if (unlikely(expand_stack(vma, address))) {
23773 bad_area(regs, error_code, address);
23774 return;
23775@@ -1209,3 +1426,292 @@ good_area:
23776
23777 up_read(&mm->mmap_sem);
23778 }
23779+
23780+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23781+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
23782+{
23783+ struct mm_struct *mm = current->mm;
23784+ unsigned long ip = regs->ip;
23785+
23786+ if (v8086_mode(regs))
23787+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
23788+
23789+#ifdef CONFIG_PAX_PAGEEXEC
23790+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
23791+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
23792+ return true;
23793+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
23794+ return true;
23795+ return false;
23796+ }
23797+#endif
23798+
23799+#ifdef CONFIG_PAX_SEGMEXEC
23800+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
23801+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
23802+ return true;
23803+ return false;
23804+ }
23805+#endif
23806+
23807+ return false;
23808+}
23809+#endif
23810+
23811+#ifdef CONFIG_PAX_EMUTRAMP
23812+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
23813+{
23814+ int err;
23815+
23816+ do { /* PaX: libffi trampoline emulation */
23817+ unsigned char mov, jmp;
23818+ unsigned int addr1, addr2;
23819+
23820+#ifdef CONFIG_X86_64
23821+ if ((regs->ip + 9) >> 32)
23822+ break;
23823+#endif
23824+
23825+ err = get_user(mov, (unsigned char __user *)regs->ip);
23826+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23827+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
23828+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23829+
23830+ if (err)
23831+ break;
23832+
23833+ if (mov == 0xB8 && jmp == 0xE9) {
23834+ regs->ax = addr1;
23835+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
23836+ return 2;
23837+ }
23838+ } while (0);
23839+
23840+ do { /* PaX: gcc trampoline emulation #1 */
23841+ unsigned char mov1, mov2;
23842+ unsigned short jmp;
23843+ unsigned int addr1, addr2;
23844+
23845+#ifdef CONFIG_X86_64
23846+ if ((regs->ip + 11) >> 32)
23847+ break;
23848+#endif
23849+
23850+ err = get_user(mov1, (unsigned char __user *)regs->ip);
23851+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23852+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
23853+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23854+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
23855+
23856+ if (err)
23857+ break;
23858+
23859+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
23860+ regs->cx = addr1;
23861+ regs->ax = addr2;
23862+ regs->ip = addr2;
23863+ return 2;
23864+ }
23865+ } while (0);
23866+
23867+ do { /* PaX: gcc trampoline emulation #2 */
23868+ unsigned char mov, jmp;
23869+ unsigned int addr1, addr2;
23870+
23871+#ifdef CONFIG_X86_64
23872+ if ((regs->ip + 9) >> 32)
23873+ break;
23874+#endif
23875+
23876+ err = get_user(mov, (unsigned char __user *)regs->ip);
23877+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
23878+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
23879+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
23880+
23881+ if (err)
23882+ break;
23883+
23884+ if (mov == 0xB9 && jmp == 0xE9) {
23885+ regs->cx = addr1;
23886+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
23887+ return 2;
23888+ }
23889+ } while (0);
23890+
23891+ return 1; /* PaX in action */
23892+}
23893+
23894+#ifdef CONFIG_X86_64
23895+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
23896+{
23897+ int err;
23898+
23899+ do { /* PaX: libffi trampoline emulation */
23900+ unsigned short mov1, mov2, jmp1;
23901+ unsigned char stcclc, jmp2;
23902+ unsigned long addr1, addr2;
23903+
23904+ err = get_user(mov1, (unsigned short __user *)regs->ip);
23905+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
23906+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
23907+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
23908+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
23909+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
23910+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
23911+
23912+ if (err)
23913+ break;
23914+
23915+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23916+ regs->r11 = addr1;
23917+ regs->r10 = addr2;
23918+ if (stcclc == 0xF8)
23919+ regs->flags &= ~X86_EFLAGS_CF;
23920+ else
23921+ regs->flags |= X86_EFLAGS_CF;
23922+ regs->ip = addr1;
23923+ return 2;
23924+ }
23925+ } while (0);
23926+
23927+ do { /* PaX: gcc trampoline emulation #1 */
23928+ unsigned short mov1, mov2, jmp1;
23929+ unsigned char jmp2;
23930+ unsigned int addr1;
23931+ unsigned long addr2;
23932+
23933+ err = get_user(mov1, (unsigned short __user *)regs->ip);
23934+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
23935+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
23936+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
23937+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
23938+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
23939+
23940+ if (err)
23941+ break;
23942+
23943+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23944+ regs->r11 = addr1;
23945+ regs->r10 = addr2;
23946+ regs->ip = addr1;
23947+ return 2;
23948+ }
23949+ } while (0);
23950+
23951+ do { /* PaX: gcc trampoline emulation #2 */
23952+ unsigned short mov1, mov2, jmp1;
23953+ unsigned char jmp2;
23954+ unsigned long addr1, addr2;
23955+
23956+ err = get_user(mov1, (unsigned short __user *)regs->ip);
23957+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
23958+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
23959+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
23960+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
23961+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
23962+
23963+ if (err)
23964+ break;
23965+
23966+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
23967+ regs->r11 = addr1;
23968+ regs->r10 = addr2;
23969+ regs->ip = addr1;
23970+ return 2;
23971+ }
23972+ } while (0);
23973+
23974+ return 1; /* PaX in action */
23975+}
23976+#endif
23977+
23978+/*
23979+ * PaX: decide what to do with offenders (regs->ip = fault address)
23980+ *
23981+ * returns 1 when task should be killed
23982+ * 2 when gcc trampoline was detected
23983+ */
23984+static int pax_handle_fetch_fault(struct pt_regs *regs)
23985+{
23986+ if (v8086_mode(regs))
23987+ return 1;
23988+
23989+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
23990+ return 1;
23991+
23992+#ifdef CONFIG_X86_32
23993+ return pax_handle_fetch_fault_32(regs);
23994+#else
23995+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
23996+ return pax_handle_fetch_fault_32(regs);
23997+ else
23998+ return pax_handle_fetch_fault_64(regs);
23999+#endif
24000+}
24001+#endif
24002+
24003+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24004+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24005+{
24006+ long i;
24007+
24008+ printk(KERN_ERR "PAX: bytes at PC: ");
24009+ for (i = 0; i < 20; i++) {
24010+ unsigned char c;
24011+ if (get_user(c, (unsigned char __force_user *)pc+i))
24012+ printk(KERN_CONT "?? ");
24013+ else
24014+ printk(KERN_CONT "%02x ", c);
24015+ }
24016+ printk("\n");
24017+
24018+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24019+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
24020+ unsigned long c;
24021+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
24022+#ifdef CONFIG_X86_32
24023+ printk(KERN_CONT "???????? ");
24024+#else
24025+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24026+ printk(KERN_CONT "???????? ???????? ");
24027+ else
24028+ printk(KERN_CONT "???????????????? ");
24029+#endif
24030+ } else {
24031+#ifdef CONFIG_X86_64
24032+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24033+ printk(KERN_CONT "%08x ", (unsigned int)c);
24034+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24035+ } else
24036+#endif
24037+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24038+ }
24039+ }
24040+ printk("\n");
24041+}
24042+#endif
24043+
24044+/**
24045+ * probe_kernel_write(): safely attempt to write to a location
24046+ * @dst: address to write to
24047+ * @src: pointer to the data that shall be written
24048+ * @size: size of the data chunk
24049+ *
24050+ * Safely write to address @dst from the buffer at @src. If a kernel fault
24051+ * happens, handle that and return -EFAULT.
24052+ */
24053+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24054+{
24055+ long ret;
24056+ mm_segment_t old_fs = get_fs();
24057+
24058+ set_fs(KERNEL_DS);
24059+ pagefault_disable();
24060+ pax_open_kernel();
24061+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24062+ pax_close_kernel();
24063+ pagefault_enable();
24064+ set_fs(old_fs);
24065+
24066+ return ret ? -EFAULT : 0;
24067+}
24068diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24069index dd74e46..7d26398 100644
24070--- a/arch/x86/mm/gup.c
24071+++ b/arch/x86/mm/gup.c
24072@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24073 addr = start;
24074 len = (unsigned long) nr_pages << PAGE_SHIFT;
24075 end = start + len;
24076- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24077+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24078 (void __user *)start, len)))
24079 return 0;
24080
24081diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24082index f4f29b1..5cac4fb 100644
24083--- a/arch/x86/mm/highmem_32.c
24084+++ b/arch/x86/mm/highmem_32.c
24085@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
24086 idx = type + KM_TYPE_NR*smp_processor_id();
24087 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24088 BUG_ON(!pte_none(*(kmap_pte-idx)));
24089+
24090+ pax_open_kernel();
24091 set_pte(kmap_pte-idx, mk_pte(page, prot));
24092+ pax_close_kernel();
24093+
24094 arch_flush_lazy_mmu_mode();
24095
24096 return (void *)vaddr;
24097diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24098index 8ecbb4b..29efd37 100644
24099--- a/arch/x86/mm/hugetlbpage.c
24100+++ b/arch/x86/mm/hugetlbpage.c
24101@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24102 struct hstate *h = hstate_file(file);
24103 struct mm_struct *mm = current->mm;
24104 struct vm_area_struct *vma;
24105- unsigned long start_addr;
24106+ unsigned long start_addr, pax_task_size = TASK_SIZE;
24107+
24108+#ifdef CONFIG_PAX_SEGMEXEC
24109+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24110+ pax_task_size = SEGMEXEC_TASK_SIZE;
24111+#endif
24112+
24113+ pax_task_size -= PAGE_SIZE;
24114
24115 if (len > mm->cached_hole_size) {
24116- start_addr = mm->free_area_cache;
24117+ start_addr = mm->free_area_cache;
24118 } else {
24119- start_addr = TASK_UNMAPPED_BASE;
24120- mm->cached_hole_size = 0;
24121+ start_addr = mm->mmap_base;
24122+ mm->cached_hole_size = 0;
24123 }
24124
24125 full_search:
24126@@ -280,26 +287,27 @@ full_search:
24127
24128 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24129 /* At this point: (!vma || addr < vma->vm_end). */
24130- if (TASK_SIZE - len < addr) {
24131+ if (pax_task_size - len < addr) {
24132 /*
24133 * Start a new search - just in case we missed
24134 * some holes.
24135 */
24136- if (start_addr != TASK_UNMAPPED_BASE) {
24137- start_addr = TASK_UNMAPPED_BASE;
24138+ if (start_addr != mm->mmap_base) {
24139+ start_addr = mm->mmap_base;
24140 mm->cached_hole_size = 0;
24141 goto full_search;
24142 }
24143 return -ENOMEM;
24144 }
24145- if (!vma || addr + len <= vma->vm_start) {
24146- mm->free_area_cache = addr + len;
24147- return addr;
24148- }
24149+ if (check_heap_stack_gap(vma, addr, len))
24150+ break;
24151 if (addr + mm->cached_hole_size < vma->vm_start)
24152 mm->cached_hole_size = vma->vm_start - addr;
24153 addr = ALIGN(vma->vm_end, huge_page_size(h));
24154 }
24155+
24156+ mm->free_area_cache = addr + len;
24157+ return addr;
24158 }
24159
24160 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24161@@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24162 {
24163 struct hstate *h = hstate_file(file);
24164 struct mm_struct *mm = current->mm;
24165- struct vm_area_struct *vma, *prev_vma;
24166- unsigned long base = mm->mmap_base, addr = addr0;
24167+ struct vm_area_struct *vma;
24168+ unsigned long base = mm->mmap_base, addr;
24169 unsigned long largest_hole = mm->cached_hole_size;
24170- int first_time = 1;
24171
24172 /* don't allow allocations above current base */
24173 if (mm->free_area_cache > base)
24174@@ -321,66 +328,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24175 largest_hole = 0;
24176 mm->free_area_cache = base;
24177 }
24178-try_again:
24179+
24180 /* make sure it can fit in the remaining address space */
24181 if (mm->free_area_cache < len)
24182 goto fail;
24183
24184 /* either no address requested or can't fit in requested address hole */
24185- addr = (mm->free_area_cache - len) & huge_page_mask(h);
24186+ addr = (mm->free_area_cache - len);
24187 do {
24188+ addr &= huge_page_mask(h);
24189+ vma = find_vma(mm, addr);
24190 /*
24191 * Lookup failure means no vma is above this address,
24192 * i.e. return with success:
24193- */
24194- vma = find_vma(mm, addr);
24195- if (!vma)
24196- return addr;
24197-
24198- /*
24199 * new region fits between prev_vma->vm_end and
24200 * vma->vm_start, use it:
24201 */
24202- prev_vma = vma->vm_prev;
24203- if (addr + len <= vma->vm_start &&
24204- (!prev_vma || (addr >= prev_vma->vm_end))) {
24205+ if (check_heap_stack_gap(vma, addr, len)) {
24206 /* remember the address as a hint for next time */
24207- mm->cached_hole_size = largest_hole;
24208- return (mm->free_area_cache = addr);
24209- } else {
24210- /* pull free_area_cache down to the first hole */
24211- if (mm->free_area_cache == vma->vm_end) {
24212- mm->free_area_cache = vma->vm_start;
24213- mm->cached_hole_size = largest_hole;
24214- }
24215+ mm->cached_hole_size = largest_hole;
24216+ return (mm->free_area_cache = addr);
24217+ }
24218+ /* pull free_area_cache down to the first hole */
24219+ if (mm->free_area_cache == vma->vm_end) {
24220+ mm->free_area_cache = vma->vm_start;
24221+ mm->cached_hole_size = largest_hole;
24222 }
24223
24224 /* remember the largest hole we saw so far */
24225 if (addr + largest_hole < vma->vm_start)
24226- largest_hole = vma->vm_start - addr;
24227+ largest_hole = vma->vm_start - addr;
24228
24229 /* try just below the current vma->vm_start */
24230- addr = (vma->vm_start - len) & huge_page_mask(h);
24231- } while (len <= vma->vm_start);
24232+ addr = skip_heap_stack_gap(vma, len);
24233+ } while (!IS_ERR_VALUE(addr));
24234
24235 fail:
24236 /*
24237- * if hint left us with no space for the requested
24238- * mapping then try again:
24239- */
24240- if (first_time) {
24241- mm->free_area_cache = base;
24242- largest_hole = 0;
24243- first_time = 0;
24244- goto try_again;
24245- }
24246- /*
24247 * A failed mmap() very likely causes application failure,
24248 * so fall back to the bottom-up function here. This scenario
24249 * can happen with large stack limits and large mmap()
24250 * allocations.
24251 */
24252- mm->free_area_cache = TASK_UNMAPPED_BASE;
24253+
24254+#ifdef CONFIG_PAX_SEGMEXEC
24255+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24256+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24257+ else
24258+#endif
24259+
24260+ mm->mmap_base = TASK_UNMAPPED_BASE;
24261+
24262+#ifdef CONFIG_PAX_RANDMMAP
24263+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24264+ mm->mmap_base += mm->delta_mmap;
24265+#endif
24266+
24267+ mm->free_area_cache = mm->mmap_base;
24268 mm->cached_hole_size = ~0UL;
24269 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24270 len, pgoff, flags);
24271@@ -388,6 +392,7 @@ fail:
24272 /*
24273 * Restore the topdown base:
24274 */
24275+ mm->mmap_base = base;
24276 mm->free_area_cache = base;
24277 mm->cached_hole_size = ~0UL;
24278
24279@@ -401,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24280 struct hstate *h = hstate_file(file);
24281 struct mm_struct *mm = current->mm;
24282 struct vm_area_struct *vma;
24283+ unsigned long pax_task_size = TASK_SIZE;
24284
24285 if (len & ~huge_page_mask(h))
24286 return -EINVAL;
24287- if (len > TASK_SIZE)
24288+
24289+#ifdef CONFIG_PAX_SEGMEXEC
24290+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24291+ pax_task_size = SEGMEXEC_TASK_SIZE;
24292+#endif
24293+
24294+ pax_task_size -= PAGE_SIZE;
24295+
24296+ if (len > pax_task_size)
24297 return -ENOMEM;
24298
24299 if (flags & MAP_FIXED) {
24300@@ -416,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24301 if (addr) {
24302 addr = ALIGN(addr, huge_page_size(h));
24303 vma = find_vma(mm, addr);
24304- if (TASK_SIZE - len >= addr &&
24305- (!vma || addr + len <= vma->vm_start))
24306+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24307 return addr;
24308 }
24309 if (mm->get_unmapped_area == arch_get_unmapped_area)
24310diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24311index 6cabf65..77e9c1c 100644
24312--- a/arch/x86/mm/init.c
24313+++ b/arch/x86/mm/init.c
24314@@ -17,6 +17,7 @@
24315 #include <asm/tlb.h>
24316 #include <asm/proto.h>
24317 #include <asm/dma.h> /* for MAX_DMA_PFN */
24318+#include <asm/desc.h>
24319
24320 unsigned long __initdata pgt_buf_start;
24321 unsigned long __meminitdata pgt_buf_end;
24322@@ -33,7 +34,7 @@ int direct_gbpages
24323 static void __init find_early_table_space(unsigned long end, int use_pse,
24324 int use_gbpages)
24325 {
24326- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24327+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24328 phys_addr_t base;
24329
24330 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
24331@@ -314,8 +315,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24332 */
24333 int devmem_is_allowed(unsigned long pagenr)
24334 {
24335+#ifdef CONFIG_GRKERNSEC_KMEM
24336+ /* allow BDA */
24337+ if (!pagenr)
24338+ return 1;
24339+ /* allow EBDA */
24340+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
24341+ return 1;
24342+#else
24343+ if (!pagenr)
24344+ return 1;
24345+#ifdef CONFIG_VM86
24346+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24347+ return 1;
24348+#endif
24349+#endif
24350+
24351+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24352+ return 1;
24353+#ifdef CONFIG_GRKERNSEC_KMEM
24354+ /* throw out everything else below 1MB */
24355 if (pagenr <= 256)
24356- return 1;
24357+ return 0;
24358+#endif
24359 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24360 return 0;
24361 if (!page_is_ram(pagenr))
24362@@ -374,6 +396,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24363
24364 void free_initmem(void)
24365 {
24366+
24367+#ifdef CONFIG_PAX_KERNEXEC
24368+#ifdef CONFIG_X86_32
24369+ /* PaX: limit KERNEL_CS to actual size */
24370+ unsigned long addr, limit;
24371+ struct desc_struct d;
24372+ int cpu;
24373+
24374+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24375+ limit = (limit - 1UL) >> PAGE_SHIFT;
24376+
24377+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24378+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24379+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24380+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24381+ }
24382+
24383+ /* PaX: make KERNEL_CS read-only */
24384+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24385+ if (!paravirt_enabled())
24386+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24387+/*
24388+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24389+ pgd = pgd_offset_k(addr);
24390+ pud = pud_offset(pgd, addr);
24391+ pmd = pmd_offset(pud, addr);
24392+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24393+ }
24394+*/
24395+#ifdef CONFIG_X86_PAE
24396+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24397+/*
24398+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24399+ pgd = pgd_offset_k(addr);
24400+ pud = pud_offset(pgd, addr);
24401+ pmd = pmd_offset(pud, addr);
24402+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24403+ }
24404+*/
24405+#endif
24406+
24407+#ifdef CONFIG_MODULES
24408+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24409+#endif
24410+
24411+#else
24412+ pgd_t *pgd;
24413+ pud_t *pud;
24414+ pmd_t *pmd;
24415+ unsigned long addr, end;
24416+
24417+ /* PaX: make kernel code/rodata read-only, rest non-executable */
24418+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24419+ pgd = pgd_offset_k(addr);
24420+ pud = pud_offset(pgd, addr);
24421+ pmd = pmd_offset(pud, addr);
24422+ if (!pmd_present(*pmd))
24423+ continue;
24424+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24425+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24426+ else
24427+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24428+ }
24429+
24430+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24431+ end = addr + KERNEL_IMAGE_SIZE;
24432+ for (; addr < end; addr += PMD_SIZE) {
24433+ pgd = pgd_offset_k(addr);
24434+ pud = pud_offset(pgd, addr);
24435+ pmd = pmd_offset(pud, addr);
24436+ if (!pmd_present(*pmd))
24437+ continue;
24438+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24439+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24440+ }
24441+#endif
24442+
24443+ flush_tlb_all();
24444+#endif
24445+
24446 free_init_pages("unused kernel memory",
24447 (unsigned long)(&__init_begin),
24448 (unsigned long)(&__init_end));
24449diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24450index 8663f6c..829ae76 100644
24451--- a/arch/x86/mm/init_32.c
24452+++ b/arch/x86/mm/init_32.c
24453@@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
24454 }
24455
24456 /*
24457- * Creates a middle page table and puts a pointer to it in the
24458- * given global directory entry. This only returns the gd entry
24459- * in non-PAE compilation mode, since the middle layer is folded.
24460- */
24461-static pmd_t * __init one_md_table_init(pgd_t *pgd)
24462-{
24463- pud_t *pud;
24464- pmd_t *pmd_table;
24465-
24466-#ifdef CONFIG_X86_PAE
24467- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24468- if (after_bootmem)
24469- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24470- else
24471- pmd_table = (pmd_t *)alloc_low_page();
24472- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24473- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24474- pud = pud_offset(pgd, 0);
24475- BUG_ON(pmd_table != pmd_offset(pud, 0));
24476-
24477- return pmd_table;
24478- }
24479-#endif
24480- pud = pud_offset(pgd, 0);
24481- pmd_table = pmd_offset(pud, 0);
24482-
24483- return pmd_table;
24484-}
24485-
24486-/*
24487 * Create a page table and place a pointer to it in a middle page
24488 * directory entry:
24489 */
24490@@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24491 page_table = (pte_t *)alloc_low_page();
24492
24493 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24494+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24495+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24496+#else
24497 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24498+#endif
24499 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24500 }
24501
24502 return pte_offset_kernel(pmd, 0);
24503 }
24504
24505+static pmd_t * __init one_md_table_init(pgd_t *pgd)
24506+{
24507+ pud_t *pud;
24508+ pmd_t *pmd_table;
24509+
24510+ pud = pud_offset(pgd, 0);
24511+ pmd_table = pmd_offset(pud, 0);
24512+
24513+ return pmd_table;
24514+}
24515+
24516 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24517 {
24518 int pgd_idx = pgd_index(vaddr);
24519@@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24520 int pgd_idx, pmd_idx;
24521 unsigned long vaddr;
24522 pgd_t *pgd;
24523+ pud_t *pud;
24524 pmd_t *pmd;
24525 pte_t *pte = NULL;
24526
24527@@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24528 pgd = pgd_base + pgd_idx;
24529
24530 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24531- pmd = one_md_table_init(pgd);
24532- pmd = pmd + pmd_index(vaddr);
24533+ pud = pud_offset(pgd, vaddr);
24534+ pmd = pmd_offset(pud, vaddr);
24535+
24536+#ifdef CONFIG_X86_PAE
24537+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24538+#endif
24539+
24540 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24541 pmd++, pmd_idx++) {
24542 pte = page_table_kmap_check(one_page_table_init(pmd),
24543@@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24544 }
24545 }
24546
24547-static inline int is_kernel_text(unsigned long addr)
24548+static inline int is_kernel_text(unsigned long start, unsigned long end)
24549 {
24550- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24551- return 1;
24552- return 0;
24553+ if ((start > ktla_ktva((unsigned long)_etext) ||
24554+ end <= ktla_ktva((unsigned long)_stext)) &&
24555+ (start > ktla_ktva((unsigned long)_einittext) ||
24556+ end <= ktla_ktva((unsigned long)_sinittext)) &&
24557+
24558+#ifdef CONFIG_ACPI_SLEEP
24559+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24560+#endif
24561+
24562+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24563+ return 0;
24564+ return 1;
24565 }
24566
24567 /*
24568@@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
24569 unsigned long last_map_addr = end;
24570 unsigned long start_pfn, end_pfn;
24571 pgd_t *pgd_base = swapper_pg_dir;
24572- int pgd_idx, pmd_idx, pte_ofs;
24573+ unsigned int pgd_idx, pmd_idx, pte_ofs;
24574 unsigned long pfn;
24575 pgd_t *pgd;
24576+ pud_t *pud;
24577 pmd_t *pmd;
24578 pte_t *pte;
24579 unsigned pages_2m, pages_4k;
24580@@ -281,8 +282,13 @@ repeat:
24581 pfn = start_pfn;
24582 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24583 pgd = pgd_base + pgd_idx;
24584- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24585- pmd = one_md_table_init(pgd);
24586+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24587+ pud = pud_offset(pgd, 0);
24588+ pmd = pmd_offset(pud, 0);
24589+
24590+#ifdef CONFIG_X86_PAE
24591+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24592+#endif
24593
24594 if (pfn >= end_pfn)
24595 continue;
24596@@ -294,14 +300,13 @@ repeat:
24597 #endif
24598 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24599 pmd++, pmd_idx++) {
24600- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24601+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24602
24603 /*
24604 * Map with big pages if possible, otherwise
24605 * create normal page tables:
24606 */
24607 if (use_pse) {
24608- unsigned int addr2;
24609 pgprot_t prot = PAGE_KERNEL_LARGE;
24610 /*
24611 * first pass will use the same initial
24612@@ -311,11 +316,7 @@ repeat:
24613 __pgprot(PTE_IDENT_ATTR |
24614 _PAGE_PSE);
24615
24616- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24617- PAGE_OFFSET + PAGE_SIZE-1;
24618-
24619- if (is_kernel_text(addr) ||
24620- is_kernel_text(addr2))
24621+ if (is_kernel_text(address, address + PMD_SIZE))
24622 prot = PAGE_KERNEL_LARGE_EXEC;
24623
24624 pages_2m++;
24625@@ -332,7 +333,7 @@ repeat:
24626 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24627 pte += pte_ofs;
24628 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24629- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24630+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24631 pgprot_t prot = PAGE_KERNEL;
24632 /*
24633 * first pass will use the same initial
24634@@ -340,7 +341,7 @@ repeat:
24635 */
24636 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24637
24638- if (is_kernel_text(addr))
24639+ if (is_kernel_text(address, address + PAGE_SIZE))
24640 prot = PAGE_KERNEL_EXEC;
24641
24642 pages_4k++;
24643@@ -466,7 +467,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24644
24645 pud = pud_offset(pgd, va);
24646 pmd = pmd_offset(pud, va);
24647- if (!pmd_present(*pmd))
24648+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
24649 break;
24650
24651 pte = pte_offset_kernel(pmd, va);
24652@@ -518,12 +519,10 @@ void __init early_ioremap_page_table_range_init(void)
24653
24654 static void __init pagetable_init(void)
24655 {
24656- pgd_t *pgd_base = swapper_pg_dir;
24657-
24658- permanent_kmaps_init(pgd_base);
24659+ permanent_kmaps_init(swapper_pg_dir);
24660 }
24661
24662-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24663+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24664 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24665
24666 /* user-defined highmem size */
24667@@ -735,6 +734,12 @@ void __init mem_init(void)
24668
24669 pci_iommu_alloc();
24670
24671+#ifdef CONFIG_PAX_PER_CPU_PGD
24672+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24673+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24674+ KERNEL_PGD_PTRS);
24675+#endif
24676+
24677 #ifdef CONFIG_FLATMEM
24678 BUG_ON(!mem_map);
24679 #endif
24680@@ -761,7 +766,7 @@ void __init mem_init(void)
24681 reservedpages++;
24682
24683 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24684- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24685+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24686 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24687
24688 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24689@@ -802,10 +807,10 @@ void __init mem_init(void)
24690 ((unsigned long)&__init_end -
24691 (unsigned long)&__init_begin) >> 10,
24692
24693- (unsigned long)&_etext, (unsigned long)&_edata,
24694- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24695+ (unsigned long)&_sdata, (unsigned long)&_edata,
24696+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24697
24698- (unsigned long)&_text, (unsigned long)&_etext,
24699+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24700 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24701
24702 /*
24703@@ -883,6 +888,7 @@ void set_kernel_text_rw(void)
24704 if (!kernel_set_to_readonly)
24705 return;
24706
24707+ start = ktla_ktva(start);
24708 pr_debug("Set kernel text: %lx - %lx for read write\n",
24709 start, start+size);
24710
24711@@ -897,6 +903,7 @@ void set_kernel_text_ro(void)
24712 if (!kernel_set_to_readonly)
24713 return;
24714
24715+ start = ktla_ktva(start);
24716 pr_debug("Set kernel text: %lx - %lx for read only\n",
24717 start, start+size);
24718
24719@@ -925,6 +932,7 @@ void mark_rodata_ro(void)
24720 unsigned long start = PFN_ALIGN(_text);
24721 unsigned long size = PFN_ALIGN(_etext) - start;
24722
24723+ start = ktla_ktva(start);
24724 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
24725 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
24726 size >> 10);
24727diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
24728index 436a030..b8596b9 100644
24729--- a/arch/x86/mm/init_64.c
24730+++ b/arch/x86/mm/init_64.c
24731@@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
24732 * around without checking the pgd every time.
24733 */
24734
24735-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
24736+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
24737 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24738
24739 int force_personality32;
24740@@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24741
24742 for (address = start; address <= end; address += PGDIR_SIZE) {
24743 const pgd_t *pgd_ref = pgd_offset_k(address);
24744+
24745+#ifdef CONFIG_PAX_PER_CPU_PGD
24746+ unsigned long cpu;
24747+#else
24748 struct page *page;
24749+#endif
24750
24751 if (pgd_none(*pgd_ref))
24752 continue;
24753
24754 spin_lock(&pgd_lock);
24755+
24756+#ifdef CONFIG_PAX_PER_CPU_PGD
24757+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24758+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
24759+#else
24760 list_for_each_entry(page, &pgd_list, lru) {
24761 pgd_t *pgd;
24762 spinlock_t *pgt_lock;
24763@@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24764 /* the pgt_lock only for Xen */
24765 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
24766 spin_lock(pgt_lock);
24767+#endif
24768
24769 if (pgd_none(*pgd))
24770 set_pgd(pgd, *pgd_ref);
24771@@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
24772 BUG_ON(pgd_page_vaddr(*pgd)
24773 != pgd_page_vaddr(*pgd_ref));
24774
24775+#ifndef CONFIG_PAX_PER_CPU_PGD
24776 spin_unlock(pgt_lock);
24777+#endif
24778+
24779 }
24780 spin_unlock(&pgd_lock);
24781 }
24782@@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
24783 pmd = fill_pmd(pud, vaddr);
24784 pte = fill_pte(pmd, vaddr);
24785
24786+ pax_open_kernel();
24787 set_pte(pte, new_pte);
24788+ pax_close_kernel();
24789
24790 /*
24791 * It's enough to flush this one mapping.
24792@@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
24793 pgd = pgd_offset_k((unsigned long)__va(phys));
24794 if (pgd_none(*pgd)) {
24795 pud = (pud_t *) spp_getpage();
24796- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
24797- _PAGE_USER));
24798+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
24799 }
24800 pud = pud_offset(pgd, (unsigned long)__va(phys));
24801 if (pud_none(*pud)) {
24802 pmd = (pmd_t *) spp_getpage();
24803- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
24804- _PAGE_USER));
24805+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
24806 }
24807 pmd = pmd_offset(pud, phys);
24808 BUG_ON(!pmd_none(*pmd));
24809@@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
24810 if (pfn >= pgt_buf_top)
24811 panic("alloc_low_page: ran out of memory");
24812
24813- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
24814+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
24815 clear_page(adr);
24816 *phys = pfn * PAGE_SIZE;
24817 return adr;
24818@@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
24819
24820 phys = __pa(virt);
24821 left = phys & (PAGE_SIZE - 1);
24822- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
24823+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
24824 adr = (void *)(((unsigned long)adr) | left);
24825
24826 return adr;
24827@@ -684,6 +698,12 @@ void __init mem_init(void)
24828
24829 pci_iommu_alloc();
24830
24831+#ifdef CONFIG_PAX_PER_CPU_PGD
24832+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24833+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24834+ KERNEL_PGD_PTRS);
24835+#endif
24836+
24837 /* clear_bss() already clear the empty_zero_page */
24838
24839 reservedpages = 0;
24840@@ -844,8 +864,8 @@ int kern_addr_valid(unsigned long addr)
24841 static struct vm_area_struct gate_vma = {
24842 .vm_start = VSYSCALL_START,
24843 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
24844- .vm_page_prot = PAGE_READONLY_EXEC,
24845- .vm_flags = VM_READ | VM_EXEC
24846+ .vm_page_prot = PAGE_READONLY,
24847+ .vm_flags = VM_READ
24848 };
24849
24850 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
24851@@ -879,7 +899,7 @@ int in_gate_area_no_mm(unsigned long addr)
24852
24853 const char *arch_vma_name(struct vm_area_struct *vma)
24854 {
24855- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
24856+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
24857 return "[vdso]";
24858 if (vma == &gate_vma)
24859 return "[vsyscall]";
24860diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
24861index 7b179b4..6bd1777 100644
24862--- a/arch/x86/mm/iomap_32.c
24863+++ b/arch/x86/mm/iomap_32.c
24864@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
24865 type = kmap_atomic_idx_push();
24866 idx = type + KM_TYPE_NR * smp_processor_id();
24867 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24868+
24869+ pax_open_kernel();
24870 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
24871+ pax_close_kernel();
24872+
24873 arch_flush_lazy_mmu_mode();
24874
24875 return (void *)vaddr;
24876diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
24877index be1ef57..55f0160 100644
24878--- a/arch/x86/mm/ioremap.c
24879+++ b/arch/x86/mm/ioremap.c
24880@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
24881 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
24882 int is_ram = page_is_ram(pfn);
24883
24884- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
24885+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
24886 return NULL;
24887 WARN_ON_ONCE(is_ram);
24888 }
24889@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
24890
24891 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
24892 if (page_is_ram(start >> PAGE_SHIFT))
24893+#ifdef CONFIG_HIGHMEM
24894+ if ((start >> PAGE_SHIFT) < max_low_pfn)
24895+#endif
24896 return __va(phys);
24897
24898 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
24899@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
24900 early_param("early_ioremap_debug", early_ioremap_debug_setup);
24901
24902 static __initdata int after_paging_init;
24903-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
24904+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
24905
24906 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
24907 {
24908@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
24909 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
24910
24911 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
24912- memset(bm_pte, 0, sizeof(bm_pte));
24913- pmd_populate_kernel(&init_mm, pmd, bm_pte);
24914+ pmd_populate_user(&init_mm, pmd, bm_pte);
24915
24916 /*
24917 * The boot-ioremap range spans multiple pmds, for which
24918diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
24919index d87dd6d..bf3fa66 100644
24920--- a/arch/x86/mm/kmemcheck/kmemcheck.c
24921+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
24922@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
24923 * memory (e.g. tracked pages)? For now, we need this to avoid
24924 * invoking kmemcheck for PnP BIOS calls.
24925 */
24926- if (regs->flags & X86_VM_MASK)
24927+ if (v8086_mode(regs))
24928 return false;
24929- if (regs->cs != __KERNEL_CS)
24930+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
24931 return false;
24932
24933 pte = kmemcheck_pte_lookup(address);
24934diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
24935index 845df68..1d8d29f 100644
24936--- a/arch/x86/mm/mmap.c
24937+++ b/arch/x86/mm/mmap.c
24938@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
24939 * Leave an at least ~128 MB hole with possible stack randomization.
24940 */
24941 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
24942-#define MAX_GAP (TASK_SIZE/6*5)
24943+#define MAX_GAP (pax_task_size/6*5)
24944
24945 static int mmap_is_legacy(void)
24946 {
24947@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
24948 return rnd << PAGE_SHIFT;
24949 }
24950
24951-static unsigned long mmap_base(void)
24952+static unsigned long mmap_base(struct mm_struct *mm)
24953 {
24954 unsigned long gap = rlimit(RLIMIT_STACK);
24955+ unsigned long pax_task_size = TASK_SIZE;
24956+
24957+#ifdef CONFIG_PAX_SEGMEXEC
24958+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24959+ pax_task_size = SEGMEXEC_TASK_SIZE;
24960+#endif
24961
24962 if (gap < MIN_GAP)
24963 gap = MIN_GAP;
24964 else if (gap > MAX_GAP)
24965 gap = MAX_GAP;
24966
24967- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
24968+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
24969 }
24970
24971 /*
24972 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
24973 * does, but not when emulating X86_32
24974 */
24975-static unsigned long mmap_legacy_base(void)
24976+static unsigned long mmap_legacy_base(struct mm_struct *mm)
24977 {
24978- if (mmap_is_ia32())
24979+ if (mmap_is_ia32()) {
24980+
24981+#ifdef CONFIG_PAX_SEGMEXEC
24982+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24983+ return SEGMEXEC_TASK_UNMAPPED_BASE;
24984+ else
24985+#endif
24986+
24987 return TASK_UNMAPPED_BASE;
24988- else
24989+ } else
24990 return TASK_UNMAPPED_BASE + mmap_rnd();
24991 }
24992
24993@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
24994 void arch_pick_mmap_layout(struct mm_struct *mm)
24995 {
24996 if (mmap_is_legacy()) {
24997- mm->mmap_base = mmap_legacy_base();
24998+ mm->mmap_base = mmap_legacy_base(mm);
24999+
25000+#ifdef CONFIG_PAX_RANDMMAP
25001+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25002+ mm->mmap_base += mm->delta_mmap;
25003+#endif
25004+
25005 mm->get_unmapped_area = arch_get_unmapped_area;
25006 mm->unmap_area = arch_unmap_area;
25007 } else {
25008- mm->mmap_base = mmap_base();
25009+ mm->mmap_base = mmap_base(mm);
25010+
25011+#ifdef CONFIG_PAX_RANDMMAP
25012+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25013+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25014+#endif
25015+
25016 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25017 mm->unmap_area = arch_unmap_area_topdown;
25018 }
25019diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25020index dc0b727..dc9d71a 100644
25021--- a/arch/x86/mm/mmio-mod.c
25022+++ b/arch/x86/mm/mmio-mod.c
25023@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25024 break;
25025 default:
25026 {
25027- unsigned char *ip = (unsigned char *)instptr;
25028+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25029 my_trace->opcode = MMIO_UNKNOWN_OP;
25030 my_trace->width = 0;
25031 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25032@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25033 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25034 void __iomem *addr)
25035 {
25036- static atomic_t next_id;
25037+ static atomic_unchecked_t next_id;
25038 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25039 /* These are page-unaligned. */
25040 struct mmiotrace_map map = {
25041@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25042 .private = trace
25043 },
25044 .phys = offset,
25045- .id = atomic_inc_return(&next_id)
25046+ .id = atomic_inc_return_unchecked(&next_id)
25047 };
25048 map.map_id = trace->id;
25049
25050diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25051index b008656..773eac2 100644
25052--- a/arch/x86/mm/pageattr-test.c
25053+++ b/arch/x86/mm/pageattr-test.c
25054@@ -36,7 +36,7 @@ enum {
25055
25056 static int pte_testbit(pte_t pte)
25057 {
25058- return pte_flags(pte) & _PAGE_UNUSED1;
25059+ return pte_flags(pte) & _PAGE_CPA_TEST;
25060 }
25061
25062 struct split_state {
25063diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25064index e1ebde3..b1e1db38 100644
25065--- a/arch/x86/mm/pageattr.c
25066+++ b/arch/x86/mm/pageattr.c
25067@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25068 */
25069 #ifdef CONFIG_PCI_BIOS
25070 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25071- pgprot_val(forbidden) |= _PAGE_NX;
25072+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25073 #endif
25074
25075 /*
25076@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25077 * Does not cover __inittext since that is gone later on. On
25078 * 64bit we do not enforce !NX on the low mapping
25079 */
25080- if (within(address, (unsigned long)_text, (unsigned long)_etext))
25081- pgprot_val(forbidden) |= _PAGE_NX;
25082+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25083+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25084
25085+#ifdef CONFIG_DEBUG_RODATA
25086 /*
25087 * The .rodata section needs to be read-only. Using the pfn
25088 * catches all aliases.
25089@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25090 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25091 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25092 pgprot_val(forbidden) |= _PAGE_RW;
25093+#endif
25094
25095 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25096 /*
25097@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25098 }
25099 #endif
25100
25101+#ifdef CONFIG_PAX_KERNEXEC
25102+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25103+ pgprot_val(forbidden) |= _PAGE_RW;
25104+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25105+ }
25106+#endif
25107+
25108 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25109
25110 return prot;
25111@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25112 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25113 {
25114 /* change init_mm */
25115+ pax_open_kernel();
25116 set_pte_atomic(kpte, pte);
25117+
25118 #ifdef CONFIG_X86_32
25119 if (!SHARED_KERNEL_PMD) {
25120+
25121+#ifdef CONFIG_PAX_PER_CPU_PGD
25122+ unsigned long cpu;
25123+#else
25124 struct page *page;
25125+#endif
25126
25127+#ifdef CONFIG_PAX_PER_CPU_PGD
25128+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25129+ pgd_t *pgd = get_cpu_pgd(cpu);
25130+#else
25131 list_for_each_entry(page, &pgd_list, lru) {
25132- pgd_t *pgd;
25133+ pgd_t *pgd = (pgd_t *)page_address(page);
25134+#endif
25135+
25136 pud_t *pud;
25137 pmd_t *pmd;
25138
25139- pgd = (pgd_t *)page_address(page) + pgd_index(address);
25140+ pgd += pgd_index(address);
25141 pud = pud_offset(pgd, address);
25142 pmd = pmd_offset(pud, address);
25143 set_pte_atomic((pte_t *)pmd, pte);
25144 }
25145 }
25146 #endif
25147+ pax_close_kernel();
25148 }
25149
25150 static int
25151diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25152index f6ff57b..481690f 100644
25153--- a/arch/x86/mm/pat.c
25154+++ b/arch/x86/mm/pat.c
25155@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
25156
25157 if (!entry) {
25158 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25159- current->comm, current->pid, start, end);
25160+ current->comm, task_pid_nr(current), start, end);
25161 return -EINVAL;
25162 }
25163
25164@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25165 while (cursor < to) {
25166 if (!devmem_is_allowed(pfn)) {
25167 printk(KERN_INFO
25168- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25169- current->comm, from, to);
25170+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25171+ current->comm, from, to, cursor);
25172 return 0;
25173 }
25174 cursor += PAGE_SIZE;
25175@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25176 printk(KERN_INFO
25177 "%s:%d ioremap_change_attr failed %s "
25178 "for %Lx-%Lx\n",
25179- current->comm, current->pid,
25180+ current->comm, task_pid_nr(current),
25181 cattr_name(flags),
25182 base, (unsigned long long)(base + size));
25183 return -EINVAL;
25184@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25185 if (want_flags != flags) {
25186 printk(KERN_WARNING
25187 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
25188- current->comm, current->pid,
25189+ current->comm, task_pid_nr(current),
25190 cattr_name(want_flags),
25191 (unsigned long long)paddr,
25192 (unsigned long long)(paddr + size),
25193@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25194 free_memtype(paddr, paddr + size);
25195 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25196 " for %Lx-%Lx, got %s\n",
25197- current->comm, current->pid,
25198+ current->comm, task_pid_nr(current),
25199 cattr_name(want_flags),
25200 (unsigned long long)paddr,
25201 (unsigned long long)(paddr + size),
25202diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25203index 9f0614d..92ae64a 100644
25204--- a/arch/x86/mm/pf_in.c
25205+++ b/arch/x86/mm/pf_in.c
25206@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25207 int i;
25208 enum reason_type rv = OTHERS;
25209
25210- p = (unsigned char *)ins_addr;
25211+ p = (unsigned char *)ktla_ktva(ins_addr);
25212 p += skip_prefix(p, &prf);
25213 p += get_opcode(p, &opcode);
25214
25215@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25216 struct prefix_bits prf;
25217 int i;
25218
25219- p = (unsigned char *)ins_addr;
25220+ p = (unsigned char *)ktla_ktva(ins_addr);
25221 p += skip_prefix(p, &prf);
25222 p += get_opcode(p, &opcode);
25223
25224@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25225 struct prefix_bits prf;
25226 int i;
25227
25228- p = (unsigned char *)ins_addr;
25229+ p = (unsigned char *)ktla_ktva(ins_addr);
25230 p += skip_prefix(p, &prf);
25231 p += get_opcode(p, &opcode);
25232
25233@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25234 struct prefix_bits prf;
25235 int i;
25236
25237- p = (unsigned char *)ins_addr;
25238+ p = (unsigned char *)ktla_ktva(ins_addr);
25239 p += skip_prefix(p, &prf);
25240 p += get_opcode(p, &opcode);
25241 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25242@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25243 struct prefix_bits prf;
25244 int i;
25245
25246- p = (unsigned char *)ins_addr;
25247+ p = (unsigned char *)ktla_ktva(ins_addr);
25248 p += skip_prefix(p, &prf);
25249 p += get_opcode(p, &opcode);
25250 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25251diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25252index 8573b83..01e9be7 100644
25253--- a/arch/x86/mm/pgtable.c
25254+++ b/arch/x86/mm/pgtable.c
25255@@ -84,10 +84,56 @@ static inline void pgd_list_del(pgd_t *pgd)
25256 list_del(&page->lru);
25257 }
25258
25259-#define UNSHARED_PTRS_PER_PGD \
25260- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25261+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25262+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25263
25264+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25265+{
25266+ while (count--)
25267+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25268+}
25269+#endif
25270
25271+#ifdef CONFIG_PAX_PER_CPU_PGD
25272+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25273+{
25274+ while (count--) {
25275+ pgd_t pgd;
25276+
25277+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
25278+
25279+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25280+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
25281+#endif
25282+
25283+ *dst++ = pgd;
25284+ }
25285+
25286+}
25287+#endif
25288+
25289+#ifdef CONFIG_X86_64
25290+#define pxd_t pud_t
25291+#define pyd_t pgd_t
25292+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25293+#define pxd_free(mm, pud) pud_free((mm), (pud))
25294+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25295+#define pyd_offset(mm, address) pgd_offset((mm), (address))
25296+#define PYD_SIZE PGDIR_SIZE
25297+#else
25298+#define pxd_t pmd_t
25299+#define pyd_t pud_t
25300+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25301+#define pxd_free(mm, pud) pmd_free((mm), (pud))
25302+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25303+#define pyd_offset(mm, address) pud_offset((mm), (address))
25304+#define PYD_SIZE PUD_SIZE
25305+#endif
25306+
25307+#ifdef CONFIG_PAX_PER_CPU_PGD
25308+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25309+static inline void pgd_dtor(pgd_t *pgd) {}
25310+#else
25311 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
25312 {
25313 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
25314@@ -128,6 +174,7 @@ static void pgd_dtor(pgd_t *pgd)
25315 pgd_list_del(pgd);
25316 spin_unlock(&pgd_lock);
25317 }
25318+#endif
25319
25320 /*
25321 * List of all pgd's needed for non-PAE so it can invalidate entries
25322@@ -140,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
25323 * -- wli
25324 */
25325
25326-#ifdef CONFIG_X86_PAE
25327+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25328 /*
25329 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25330 * updating the top-level pagetable entries to guarantee the
25331@@ -152,7 +199,7 @@ static void pgd_dtor(pgd_t *pgd)
25332 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25333 * and initialize the kernel pmds here.
25334 */
25335-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25336+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25337
25338 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25339 {
25340@@ -170,36 +217,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25341 */
25342 flush_tlb_mm(mm);
25343 }
25344+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25345+#define PREALLOCATED_PXDS USER_PGD_PTRS
25346 #else /* !CONFIG_X86_PAE */
25347
25348 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25349-#define PREALLOCATED_PMDS 0
25350+#define PREALLOCATED_PXDS 0
25351
25352 #endif /* CONFIG_X86_PAE */
25353
25354-static void free_pmds(pmd_t *pmds[])
25355+static void free_pxds(pxd_t *pxds[])
25356 {
25357 int i;
25358
25359- for(i = 0; i < PREALLOCATED_PMDS; i++)
25360- if (pmds[i])
25361- free_page((unsigned long)pmds[i]);
25362+ for(i = 0; i < PREALLOCATED_PXDS; i++)
25363+ if (pxds[i])
25364+ free_page((unsigned long)pxds[i]);
25365 }
25366
25367-static int preallocate_pmds(pmd_t *pmds[])
25368+static int preallocate_pxds(pxd_t *pxds[])
25369 {
25370 int i;
25371 bool failed = false;
25372
25373- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25374- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25375- if (pmd == NULL)
25376+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25377+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25378+ if (pxd == NULL)
25379 failed = true;
25380- pmds[i] = pmd;
25381+ pxds[i] = pxd;
25382 }
25383
25384 if (failed) {
25385- free_pmds(pmds);
25386+ free_pxds(pxds);
25387 return -ENOMEM;
25388 }
25389
25390@@ -212,51 +261,55 @@ static int preallocate_pmds(pmd_t *pmds[])
25391 * preallocate which never got a corresponding vma will need to be
25392 * freed manually.
25393 */
25394-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25395+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25396 {
25397 int i;
25398
25399- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25400+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25401 pgd_t pgd = pgdp[i];
25402
25403 if (pgd_val(pgd) != 0) {
25404- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25405+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25406
25407- pgdp[i] = native_make_pgd(0);
25408+ set_pgd(pgdp + i, native_make_pgd(0));
25409
25410- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25411- pmd_free(mm, pmd);
25412+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25413+ pxd_free(mm, pxd);
25414 }
25415 }
25416 }
25417
25418-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25419+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25420 {
25421- pud_t *pud;
25422+ pyd_t *pyd;
25423 unsigned long addr;
25424 int i;
25425
25426- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25427+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25428 return;
25429
25430- pud = pud_offset(pgd, 0);
25431+#ifdef CONFIG_X86_64
25432+ pyd = pyd_offset(mm, 0L);
25433+#else
25434+ pyd = pyd_offset(pgd, 0L);
25435+#endif
25436
25437- for (addr = i = 0; i < PREALLOCATED_PMDS;
25438- i++, pud++, addr += PUD_SIZE) {
25439- pmd_t *pmd = pmds[i];
25440+ for (addr = i = 0; i < PREALLOCATED_PXDS;
25441+ i++, pyd++, addr += PYD_SIZE) {
25442+ pxd_t *pxd = pxds[i];
25443
25444 if (i >= KERNEL_PGD_BOUNDARY)
25445- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25446- sizeof(pmd_t) * PTRS_PER_PMD);
25447+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25448+ sizeof(pxd_t) * PTRS_PER_PMD);
25449
25450- pud_populate(mm, pud, pmd);
25451+ pyd_populate(mm, pyd, pxd);
25452 }
25453 }
25454
25455 pgd_t *pgd_alloc(struct mm_struct *mm)
25456 {
25457 pgd_t *pgd;
25458- pmd_t *pmds[PREALLOCATED_PMDS];
25459+ pxd_t *pxds[PREALLOCATED_PXDS];
25460
25461 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25462
25463@@ -265,11 +318,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25464
25465 mm->pgd = pgd;
25466
25467- if (preallocate_pmds(pmds) != 0)
25468+ if (preallocate_pxds(pxds) != 0)
25469 goto out_free_pgd;
25470
25471 if (paravirt_pgd_alloc(mm) != 0)
25472- goto out_free_pmds;
25473+ goto out_free_pxds;
25474
25475 /*
25476 * Make sure that pre-populating the pmds is atomic with
25477@@ -279,14 +332,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25478 spin_lock(&pgd_lock);
25479
25480 pgd_ctor(mm, pgd);
25481- pgd_prepopulate_pmd(mm, pgd, pmds);
25482+ pgd_prepopulate_pxd(mm, pgd, pxds);
25483
25484 spin_unlock(&pgd_lock);
25485
25486 return pgd;
25487
25488-out_free_pmds:
25489- free_pmds(pmds);
25490+out_free_pxds:
25491+ free_pxds(pxds);
25492 out_free_pgd:
25493 free_page((unsigned long)pgd);
25494 out:
25495@@ -295,7 +348,7 @@ out:
25496
25497 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25498 {
25499- pgd_mop_up_pmds(mm, pgd);
25500+ pgd_mop_up_pxds(mm, pgd);
25501 pgd_dtor(pgd);
25502 paravirt_pgd_free(mm, pgd);
25503 free_page((unsigned long)pgd);
25504diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25505index cac7184..09a39fa 100644
25506--- a/arch/x86/mm/pgtable_32.c
25507+++ b/arch/x86/mm/pgtable_32.c
25508@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25509 return;
25510 }
25511 pte = pte_offset_kernel(pmd, vaddr);
25512+
25513+ pax_open_kernel();
25514 if (pte_val(pteval))
25515 set_pte_at(&init_mm, vaddr, pte, pteval);
25516 else
25517 pte_clear(&init_mm, vaddr, pte);
25518+ pax_close_kernel();
25519
25520 /*
25521 * It's enough to flush this one mapping.
25522diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25523index 410531d..0f16030 100644
25524--- a/arch/x86/mm/setup_nx.c
25525+++ b/arch/x86/mm/setup_nx.c
25526@@ -5,8 +5,10 @@
25527 #include <asm/pgtable.h>
25528 #include <asm/proto.h>
25529
25530+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25531 static int disable_nx __cpuinitdata;
25532
25533+#ifndef CONFIG_PAX_PAGEEXEC
25534 /*
25535 * noexec = on|off
25536 *
25537@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25538 return 0;
25539 }
25540 early_param("noexec", noexec_setup);
25541+#endif
25542+
25543+#endif
25544
25545 void __cpuinit x86_configure_nx(void)
25546 {
25547+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25548 if (cpu_has_nx && !disable_nx)
25549 __supported_pte_mask |= _PAGE_NX;
25550 else
25551+#endif
25552 __supported_pte_mask &= ~_PAGE_NX;
25553 }
25554
25555diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25556index d6c0418..06a0ad5 100644
25557--- a/arch/x86/mm/tlb.c
25558+++ b/arch/x86/mm/tlb.c
25559@@ -65,7 +65,11 @@ void leave_mm(int cpu)
25560 BUG();
25561 cpumask_clear_cpu(cpu,
25562 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25563+
25564+#ifndef CONFIG_PAX_PER_CPU_PGD
25565 load_cr3(swapper_pg_dir);
25566+#endif
25567+
25568 }
25569 EXPORT_SYMBOL_GPL(leave_mm);
25570
25571diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25572index 6687022..ceabcfa 100644
25573--- a/arch/x86/net/bpf_jit.S
25574+++ b/arch/x86/net/bpf_jit.S
25575@@ -9,6 +9,7 @@
25576 */
25577 #include <linux/linkage.h>
25578 #include <asm/dwarf2.h>
25579+#include <asm/alternative-asm.h>
25580
25581 /*
25582 * Calling convention :
25583@@ -35,6 +36,7 @@ sk_load_word:
25584 jle bpf_slow_path_word
25585 mov (SKBDATA,%rsi),%eax
25586 bswap %eax /* ntohl() */
25587+ pax_force_retaddr
25588 ret
25589
25590
25591@@ -53,6 +55,7 @@ sk_load_half:
25592 jle bpf_slow_path_half
25593 movzwl (SKBDATA,%rsi),%eax
25594 rol $8,%ax # ntohs()
25595+ pax_force_retaddr
25596 ret
25597
25598 sk_load_byte_ind:
25599@@ -66,6 +69,7 @@ sk_load_byte:
25600 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25601 jle bpf_slow_path_byte
25602 movzbl (SKBDATA,%rsi),%eax
25603+ pax_force_retaddr
25604 ret
25605
25606 /**
25607@@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
25608 movzbl (SKBDATA,%rsi),%ebx
25609 and $15,%bl
25610 shl $2,%bl
25611+ pax_force_retaddr
25612 ret
25613 CFI_ENDPROC
25614 ENDPROC(sk_load_byte_msh)
25615@@ -91,6 +96,7 @@ bpf_error:
25616 xor %eax,%eax
25617 mov -8(%rbp),%rbx
25618 leaveq
25619+ pax_force_retaddr
25620 ret
25621
25622 /* rsi contains offset and can be scratched */
25623@@ -113,6 +119,7 @@ bpf_slow_path_word:
25624 js bpf_error
25625 mov -12(%rbp),%eax
25626 bswap %eax
25627+ pax_force_retaddr
25628 ret
25629
25630 bpf_slow_path_half:
25631@@ -121,12 +128,14 @@ bpf_slow_path_half:
25632 mov -12(%rbp),%ax
25633 rol $8,%ax
25634 movzwl %ax,%eax
25635+ pax_force_retaddr
25636 ret
25637
25638 bpf_slow_path_byte:
25639 bpf_slow_path_common(1)
25640 js bpf_error
25641 movzbl -12(%rbp),%eax
25642+ pax_force_retaddr
25643 ret
25644
25645 bpf_slow_path_byte_msh:
25646@@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
25647 and $15,%al
25648 shl $2,%al
25649 xchg %eax,%ebx
25650+ pax_force_retaddr
25651 ret
25652diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
25653index 5671752..6838cd1 100644
25654--- a/arch/x86/net/bpf_jit_comp.c
25655+++ b/arch/x86/net/bpf_jit_comp.c
25656@@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
25657 set_fs(old_fs);
25658 }
25659
25660+struct bpf_jit_work {
25661+ struct work_struct work;
25662+ void *image;
25663+};
25664
25665 void bpf_jit_compile(struct sk_filter *fp)
25666 {
25667@@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
25668 if (addrs == NULL)
25669 return;
25670
25671+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
25672+ if (!fp->work)
25673+ goto out;
25674+
25675 /* Before first pass, make a rough estimation of addrs[]
25676 * each bpf instruction is translated to less than 64 bytes
25677 */
25678@@ -477,7 +485,7 @@ void bpf_jit_compile(struct sk_filter *fp)
25679 common_load: seen |= SEEN_DATAREF;
25680 if ((int)K < 0) {
25681 /* Abort the JIT because __load_pointer() is needed. */
25682- goto out;
25683+ goto error;
25684 }
25685 t_offset = func - (image + addrs[i]);
25686 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
25687@@ -492,7 +500,7 @@ common_load: seen |= SEEN_DATAREF;
25688 case BPF_S_LDX_B_MSH:
25689 if ((int)K < 0) {
25690 /* Abort the JIT because __load_pointer() is needed. */
25691- goto out;
25692+ goto error;
25693 }
25694 seen |= SEEN_DATAREF | SEEN_XREG;
25695 t_offset = sk_load_byte_msh - (image + addrs[i]);
25696@@ -582,17 +590,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25697 break;
25698 default:
25699 /* hmm, too complex filter, give up with jit compiler */
25700- goto out;
25701+ goto error;
25702 }
25703 ilen = prog - temp;
25704 if (image) {
25705 if (unlikely(proglen + ilen > oldproglen)) {
25706 pr_err("bpb_jit_compile fatal error\n");
25707- kfree(addrs);
25708- module_free(NULL, image);
25709- return;
25710+ module_free_exec(NULL, image);
25711+ goto error;
25712 }
25713+ pax_open_kernel();
25714 memcpy(image + proglen, temp, ilen);
25715+ pax_close_kernel();
25716 }
25717 proglen += ilen;
25718 addrs[i] = proglen;
25719@@ -613,11 +622,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25720 break;
25721 }
25722 if (proglen == oldproglen) {
25723- image = module_alloc(max_t(unsigned int,
25724- proglen,
25725- sizeof(struct work_struct)));
25726+ image = module_alloc_exec(proglen);
25727 if (!image)
25728- goto out;
25729+ goto error;
25730 }
25731 oldproglen = proglen;
25732 }
25733@@ -633,7 +640,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
25734 bpf_flush_icache(image, image + proglen);
25735
25736 fp->bpf_func = (void *)image;
25737- }
25738+ } else
25739+error:
25740+ kfree(fp->work);
25741+
25742 out:
25743 kfree(addrs);
25744 return;
25745@@ -641,18 +651,20 @@ out:
25746
25747 static void jit_free_defer(struct work_struct *arg)
25748 {
25749- module_free(NULL, arg);
25750+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
25751+ kfree(arg);
25752 }
25753
25754 /* run from softirq, we must use a work_struct to call
25755- * module_free() from process context
25756+ * module_free_exec() from process context
25757 */
25758 void bpf_jit_free(struct sk_filter *fp)
25759 {
25760 if (fp->bpf_func != sk_run_filter) {
25761- struct work_struct *work = (struct work_struct *)fp->bpf_func;
25762+ struct work_struct *work = &fp->work->work;
25763
25764 INIT_WORK(work, jit_free_defer);
25765+ fp->work->image = fp->bpf_func;
25766 schedule_work(work);
25767 }
25768 }
25769diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
25770index bff89df..377758a 100644
25771--- a/arch/x86/oprofile/backtrace.c
25772+++ b/arch/x86/oprofile/backtrace.c
25773@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
25774 struct stack_frame_ia32 *fp;
25775 unsigned long bytes;
25776
25777- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
25778+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
25779 if (bytes != sizeof(bufhead))
25780 return NULL;
25781
25782- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
25783+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
25784
25785 oprofile_add_trace(bufhead[0].return_address);
25786
25787@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
25788 struct stack_frame bufhead[2];
25789 unsigned long bytes;
25790
25791- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
25792+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
25793 if (bytes != sizeof(bufhead))
25794 return NULL;
25795
25796@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
25797 {
25798 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
25799
25800- if (!user_mode_vm(regs)) {
25801+ if (!user_mode(regs)) {
25802 unsigned long stack = kernel_stack_pointer(regs);
25803 if (depth)
25804 dump_trace(NULL, regs, (unsigned long *)stack, 0,
25805diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
25806index cb29191..036766d 100644
25807--- a/arch/x86/pci/mrst.c
25808+++ b/arch/x86/pci/mrst.c
25809@@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
25810 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
25811 pci_mmcfg_late_init();
25812 pcibios_enable_irq = mrst_pci_irq_enable;
25813- pci_root_ops = pci_mrst_ops;
25814+ pax_open_kernel();
25815+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
25816+ pax_close_kernel();
25817 /* Continue with standard init */
25818 return 1;
25819 }
25820diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
25821index da8fe05..7ee6704 100644
25822--- a/arch/x86/pci/pcbios.c
25823+++ b/arch/x86/pci/pcbios.c
25824@@ -79,50 +79,93 @@ union bios32 {
25825 static struct {
25826 unsigned long address;
25827 unsigned short segment;
25828-} bios32_indirect = { 0, __KERNEL_CS };
25829+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
25830
25831 /*
25832 * Returns the entry point for the given service, NULL on error
25833 */
25834
25835-static unsigned long bios32_service(unsigned long service)
25836+static unsigned long __devinit bios32_service(unsigned long service)
25837 {
25838 unsigned char return_code; /* %al */
25839 unsigned long address; /* %ebx */
25840 unsigned long length; /* %ecx */
25841 unsigned long entry; /* %edx */
25842 unsigned long flags;
25843+ struct desc_struct d, *gdt;
25844
25845 local_irq_save(flags);
25846- __asm__("lcall *(%%edi); cld"
25847+
25848+ gdt = get_cpu_gdt_table(smp_processor_id());
25849+
25850+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
25851+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
25852+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
25853+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
25854+
25855+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
25856 : "=a" (return_code),
25857 "=b" (address),
25858 "=c" (length),
25859 "=d" (entry)
25860 : "0" (service),
25861 "1" (0),
25862- "D" (&bios32_indirect));
25863+ "D" (&bios32_indirect),
25864+ "r"(__PCIBIOS_DS)
25865+ : "memory");
25866+
25867+ pax_open_kernel();
25868+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
25869+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
25870+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
25871+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
25872+ pax_close_kernel();
25873+
25874 local_irq_restore(flags);
25875
25876 switch (return_code) {
25877- case 0:
25878- return address + entry;
25879- case 0x80: /* Not present */
25880- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
25881- return 0;
25882- default: /* Shouldn't happen */
25883- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
25884- service, return_code);
25885+ case 0: {
25886+ int cpu;
25887+ unsigned char flags;
25888+
25889+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
25890+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
25891+ printk(KERN_WARNING "bios32_service: not valid\n");
25892 return 0;
25893+ }
25894+ address = address + PAGE_OFFSET;
25895+ length += 16UL; /* some BIOSs underreport this... */
25896+ flags = 4;
25897+ if (length >= 64*1024*1024) {
25898+ length >>= PAGE_SHIFT;
25899+ flags |= 8;
25900+ }
25901+
25902+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
25903+ gdt = get_cpu_gdt_table(cpu);
25904+ pack_descriptor(&d, address, length, 0x9b, flags);
25905+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
25906+ pack_descriptor(&d, address, length, 0x93, flags);
25907+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
25908+ }
25909+ return entry;
25910+ }
25911+ case 0x80: /* Not present */
25912+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
25913+ return 0;
25914+ default: /* Shouldn't happen */
25915+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
25916+ service, return_code);
25917+ return 0;
25918 }
25919 }
25920
25921 static struct {
25922 unsigned long address;
25923 unsigned short segment;
25924-} pci_indirect = { 0, __KERNEL_CS };
25925+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
25926
25927-static int pci_bios_present;
25928+static int pci_bios_present __read_only;
25929
25930 static int __devinit check_pcibios(void)
25931 {
25932@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
25933 unsigned long flags, pcibios_entry;
25934
25935 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
25936- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
25937+ pci_indirect.address = pcibios_entry;
25938
25939 local_irq_save(flags);
25940- __asm__(
25941- "lcall *(%%edi); cld\n\t"
25942+ __asm__("movw %w6, %%ds\n\t"
25943+ "lcall *%%ss:(%%edi); cld\n\t"
25944+ "push %%ss\n\t"
25945+ "pop %%ds\n\t"
25946 "jc 1f\n\t"
25947 "xor %%ah, %%ah\n"
25948 "1:"
25949@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
25950 "=b" (ebx),
25951 "=c" (ecx)
25952 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
25953- "D" (&pci_indirect)
25954+ "D" (&pci_indirect),
25955+ "r" (__PCIBIOS_DS)
25956 : "memory");
25957 local_irq_restore(flags);
25958
25959@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25960
25961 switch (len) {
25962 case 1:
25963- __asm__("lcall *(%%esi); cld\n\t"
25964+ __asm__("movw %w6, %%ds\n\t"
25965+ "lcall *%%ss:(%%esi); cld\n\t"
25966+ "push %%ss\n\t"
25967+ "pop %%ds\n\t"
25968 "jc 1f\n\t"
25969 "xor %%ah, %%ah\n"
25970 "1:"
25971@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25972 : "1" (PCIBIOS_READ_CONFIG_BYTE),
25973 "b" (bx),
25974 "D" ((long)reg),
25975- "S" (&pci_indirect));
25976+ "S" (&pci_indirect),
25977+ "r" (__PCIBIOS_DS));
25978 /*
25979 * Zero-extend the result beyond 8 bits, do not trust the
25980 * BIOS having done it:
25981@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25982 *value &= 0xff;
25983 break;
25984 case 2:
25985- __asm__("lcall *(%%esi); cld\n\t"
25986+ __asm__("movw %w6, %%ds\n\t"
25987+ "lcall *%%ss:(%%esi); cld\n\t"
25988+ "push %%ss\n\t"
25989+ "pop %%ds\n\t"
25990 "jc 1f\n\t"
25991 "xor %%ah, %%ah\n"
25992 "1:"
25993@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
25994 : "1" (PCIBIOS_READ_CONFIG_WORD),
25995 "b" (bx),
25996 "D" ((long)reg),
25997- "S" (&pci_indirect));
25998+ "S" (&pci_indirect),
25999+ "r" (__PCIBIOS_DS));
26000 /*
26001 * Zero-extend the result beyond 16 bits, do not trust the
26002 * BIOS having done it:
26003@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26004 *value &= 0xffff;
26005 break;
26006 case 4:
26007- __asm__("lcall *(%%esi); cld\n\t"
26008+ __asm__("movw %w6, %%ds\n\t"
26009+ "lcall *%%ss:(%%esi); cld\n\t"
26010+ "push %%ss\n\t"
26011+ "pop %%ds\n\t"
26012 "jc 1f\n\t"
26013 "xor %%ah, %%ah\n"
26014 "1:"
26015@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26016 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26017 "b" (bx),
26018 "D" ((long)reg),
26019- "S" (&pci_indirect));
26020+ "S" (&pci_indirect),
26021+ "r" (__PCIBIOS_DS));
26022 break;
26023 }
26024
26025@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26026
26027 switch (len) {
26028 case 1:
26029- __asm__("lcall *(%%esi); cld\n\t"
26030+ __asm__("movw %w6, %%ds\n\t"
26031+ "lcall *%%ss:(%%esi); cld\n\t"
26032+ "push %%ss\n\t"
26033+ "pop %%ds\n\t"
26034 "jc 1f\n\t"
26035 "xor %%ah, %%ah\n"
26036 "1:"
26037@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26038 "c" (value),
26039 "b" (bx),
26040 "D" ((long)reg),
26041- "S" (&pci_indirect));
26042+ "S" (&pci_indirect),
26043+ "r" (__PCIBIOS_DS));
26044 break;
26045 case 2:
26046- __asm__("lcall *(%%esi); cld\n\t"
26047+ __asm__("movw %w6, %%ds\n\t"
26048+ "lcall *%%ss:(%%esi); cld\n\t"
26049+ "push %%ss\n\t"
26050+ "pop %%ds\n\t"
26051 "jc 1f\n\t"
26052 "xor %%ah, %%ah\n"
26053 "1:"
26054@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26055 "c" (value),
26056 "b" (bx),
26057 "D" ((long)reg),
26058- "S" (&pci_indirect));
26059+ "S" (&pci_indirect),
26060+ "r" (__PCIBIOS_DS));
26061 break;
26062 case 4:
26063- __asm__("lcall *(%%esi); cld\n\t"
26064+ __asm__("movw %w6, %%ds\n\t"
26065+ "lcall *%%ss:(%%esi); cld\n\t"
26066+ "push %%ss\n\t"
26067+ "pop %%ds\n\t"
26068 "jc 1f\n\t"
26069 "xor %%ah, %%ah\n"
26070 "1:"
26071@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26072 "c" (value),
26073 "b" (bx),
26074 "D" ((long)reg),
26075- "S" (&pci_indirect));
26076+ "S" (&pci_indirect),
26077+ "r" (__PCIBIOS_DS));
26078 break;
26079 }
26080
26081@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26082
26083 DBG("PCI: Fetching IRQ routing table... ");
26084 __asm__("push %%es\n\t"
26085+ "movw %w8, %%ds\n\t"
26086 "push %%ds\n\t"
26087 "pop %%es\n\t"
26088- "lcall *(%%esi); cld\n\t"
26089+ "lcall *%%ss:(%%esi); cld\n\t"
26090 "pop %%es\n\t"
26091+ "push %%ss\n\t"
26092+ "pop %%ds\n"
26093 "jc 1f\n\t"
26094 "xor %%ah, %%ah\n"
26095 "1:"
26096@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26097 "1" (0),
26098 "D" ((long) &opt),
26099 "S" (&pci_indirect),
26100- "m" (opt)
26101+ "m" (opt),
26102+ "r" (__PCIBIOS_DS)
26103 : "memory");
26104 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26105 if (ret & 0xff00)
26106@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26107 {
26108 int ret;
26109
26110- __asm__("lcall *(%%esi); cld\n\t"
26111+ __asm__("movw %w5, %%ds\n\t"
26112+ "lcall *%%ss:(%%esi); cld\n\t"
26113+ "push %%ss\n\t"
26114+ "pop %%ds\n"
26115 "jc 1f\n\t"
26116 "xor %%ah, %%ah\n"
26117 "1:"
26118@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26119 : "0" (PCIBIOS_SET_PCI_HW_INT),
26120 "b" ((dev->bus->number << 8) | dev->devfn),
26121 "c" ((irq << 8) | (pin + 10)),
26122- "S" (&pci_indirect));
26123+ "S" (&pci_indirect),
26124+ "r" (__PCIBIOS_DS));
26125 return !(ret & 0xff00);
26126 }
26127 EXPORT_SYMBOL(pcibios_set_irq_routing);
26128diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
26129index 40e4469..1ab536e 100644
26130--- a/arch/x86/platform/efi/efi_32.c
26131+++ b/arch/x86/platform/efi/efi_32.c
26132@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
26133 {
26134 struct desc_ptr gdt_descr;
26135
26136+#ifdef CONFIG_PAX_KERNEXEC
26137+ struct desc_struct d;
26138+#endif
26139+
26140 local_irq_save(efi_rt_eflags);
26141
26142 load_cr3(initial_page_table);
26143 __flush_tlb_all();
26144
26145+#ifdef CONFIG_PAX_KERNEXEC
26146+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
26147+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26148+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
26149+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26150+#endif
26151+
26152 gdt_descr.address = __pa(get_cpu_gdt_table(0));
26153 gdt_descr.size = GDT_SIZE - 1;
26154 load_gdt(&gdt_descr);
26155@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
26156 {
26157 struct desc_ptr gdt_descr;
26158
26159+#ifdef CONFIG_PAX_KERNEXEC
26160+ struct desc_struct d;
26161+
26162+ memset(&d, 0, sizeof d);
26163+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26164+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26165+#endif
26166+
26167 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
26168 gdt_descr.size = GDT_SIZE - 1;
26169 load_gdt(&gdt_descr);
26170diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26171index fbe66e6..c5c0dd2 100644
26172--- a/arch/x86/platform/efi/efi_stub_32.S
26173+++ b/arch/x86/platform/efi/efi_stub_32.S
26174@@ -6,7 +6,9 @@
26175 */
26176
26177 #include <linux/linkage.h>
26178+#include <linux/init.h>
26179 #include <asm/page_types.h>
26180+#include <asm/segment.h>
26181
26182 /*
26183 * efi_call_phys(void *, ...) is a function with variable parameters.
26184@@ -20,7 +22,7 @@
26185 * service functions will comply with gcc calling convention, too.
26186 */
26187
26188-.text
26189+__INIT
26190 ENTRY(efi_call_phys)
26191 /*
26192 * 0. The function can only be called in Linux kernel. So CS has been
26193@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
26194 * The mapping of lower virtual memory has been created in prelog and
26195 * epilog.
26196 */
26197- movl $1f, %edx
26198- subl $__PAGE_OFFSET, %edx
26199- jmp *%edx
26200+ movl $(__KERNEXEC_EFI_DS), %edx
26201+ mov %edx, %ds
26202+ mov %edx, %es
26203+ mov %edx, %ss
26204+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
26205 1:
26206
26207 /*
26208@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
26209 * parameter 2, ..., param n. To make things easy, we save the return
26210 * address of efi_call_phys in a global variable.
26211 */
26212- popl %edx
26213- movl %edx, saved_return_addr
26214- /* get the function pointer into ECX*/
26215- popl %ecx
26216- movl %ecx, efi_rt_function_ptr
26217- movl $2f, %edx
26218- subl $__PAGE_OFFSET, %edx
26219- pushl %edx
26220+ popl (saved_return_addr)
26221+ popl (efi_rt_function_ptr)
26222
26223 /*
26224 * 3. Clear PG bit in %CR0.
26225@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
26226 /*
26227 * 5. Call the physical function.
26228 */
26229- jmp *%ecx
26230+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
26231
26232-2:
26233 /*
26234 * 6. After EFI runtime service returns, control will return to
26235 * following instruction. We'd better readjust stack pointer first.
26236@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
26237 movl %cr0, %edx
26238 orl $0x80000000, %edx
26239 movl %edx, %cr0
26240- jmp 1f
26241-1:
26242+
26243 /*
26244 * 8. Now restore the virtual mode from flat mode by
26245 * adding EIP with PAGE_OFFSET.
26246 */
26247- movl $1f, %edx
26248- jmp *%edx
26249+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
26250 1:
26251+ movl $(__KERNEL_DS), %edx
26252+ mov %edx, %ds
26253+ mov %edx, %es
26254+ mov %edx, %ss
26255
26256 /*
26257 * 9. Balance the stack. And because EAX contain the return value,
26258 * we'd better not clobber it.
26259 */
26260- leal efi_rt_function_ptr, %edx
26261- movl (%edx), %ecx
26262- pushl %ecx
26263+ pushl (efi_rt_function_ptr)
26264
26265 /*
26266- * 10. Push the saved return address onto the stack and return.
26267+ * 10. Return to the saved return address.
26268 */
26269- leal saved_return_addr, %edx
26270- movl (%edx), %ecx
26271- pushl %ecx
26272- ret
26273+ jmpl *(saved_return_addr)
26274 ENDPROC(efi_call_phys)
26275 .previous
26276
26277-.data
26278+__INITDATA
26279 saved_return_addr:
26280 .long 0
26281 efi_rt_function_ptr:
26282diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26283index 4c07cca..2c8427d 100644
26284--- a/arch/x86/platform/efi/efi_stub_64.S
26285+++ b/arch/x86/platform/efi/efi_stub_64.S
26286@@ -7,6 +7,7 @@
26287 */
26288
26289 #include <linux/linkage.h>
26290+#include <asm/alternative-asm.h>
26291
26292 #define SAVE_XMM \
26293 mov %rsp, %rax; \
26294@@ -40,6 +41,7 @@ ENTRY(efi_call0)
26295 call *%rdi
26296 addq $32, %rsp
26297 RESTORE_XMM
26298+ pax_force_retaddr 0, 1
26299 ret
26300 ENDPROC(efi_call0)
26301
26302@@ -50,6 +52,7 @@ ENTRY(efi_call1)
26303 call *%rdi
26304 addq $32, %rsp
26305 RESTORE_XMM
26306+ pax_force_retaddr 0, 1
26307 ret
26308 ENDPROC(efi_call1)
26309
26310@@ -60,6 +63,7 @@ ENTRY(efi_call2)
26311 call *%rdi
26312 addq $32, %rsp
26313 RESTORE_XMM
26314+ pax_force_retaddr 0, 1
26315 ret
26316 ENDPROC(efi_call2)
26317
26318@@ -71,6 +75,7 @@ ENTRY(efi_call3)
26319 call *%rdi
26320 addq $32, %rsp
26321 RESTORE_XMM
26322+ pax_force_retaddr 0, 1
26323 ret
26324 ENDPROC(efi_call3)
26325
26326@@ -83,6 +88,7 @@ ENTRY(efi_call4)
26327 call *%rdi
26328 addq $32, %rsp
26329 RESTORE_XMM
26330+ pax_force_retaddr 0, 1
26331 ret
26332 ENDPROC(efi_call4)
26333
26334@@ -96,6 +102,7 @@ ENTRY(efi_call5)
26335 call *%rdi
26336 addq $48, %rsp
26337 RESTORE_XMM
26338+ pax_force_retaddr 0, 1
26339 ret
26340 ENDPROC(efi_call5)
26341
26342@@ -112,5 +119,6 @@ ENTRY(efi_call6)
26343 call *%rdi
26344 addq $48, %rsp
26345 RESTORE_XMM
26346+ pax_force_retaddr 0, 1
26347 ret
26348 ENDPROC(efi_call6)
26349diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
26350index 475e2cd..1b8e708 100644
26351--- a/arch/x86/platform/mrst/mrst.c
26352+++ b/arch/x86/platform/mrst/mrst.c
26353@@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
26354 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
26355 int sfi_mrtc_num;
26356
26357-static void mrst_power_off(void)
26358+static __noreturn void mrst_power_off(void)
26359 {
26360 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
26361 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
26362+ BUG();
26363 }
26364
26365-static void mrst_reboot(void)
26366+static __noreturn void mrst_reboot(void)
26367 {
26368 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
26369 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
26370 else
26371 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
26372+ BUG();
26373 }
26374
26375 /* parse all the mtimer info to a static mtimer array */
26376diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
26377index 3ae0e61..4202d86 100644
26378--- a/arch/x86/platform/uv/tlb_uv.c
26379+++ b/arch/x86/platform/uv/tlb_uv.c
26380@@ -1424,6 +1424,8 @@ static ssize_t tunables_read(struct file *file, char __user *userbuf,
26381 * 0: display meaning of the statistics
26382 */
26383 static ssize_t ptc_proc_write(struct file *file, const char __user *user,
26384+ size_t count, loff_t *data) __size_overflow(3);
26385+static ssize_t ptc_proc_write(struct file *file, const char __user *user,
26386 size_t count, loff_t *data)
26387 {
26388 int cpu;
26389@@ -1539,6 +1541,8 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr,
26390 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
26391 */
26392 static ssize_t tunables_write(struct file *file, const char __user *user,
26393+ size_t count, loff_t *data) __size_overflow(3);
26394+static ssize_t tunables_write(struct file *file, const char __user *user,
26395 size_t count, loff_t *data)
26396 {
26397 int cpu;
26398diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26399index f10c0af..3ec1f95 100644
26400--- a/arch/x86/power/cpu.c
26401+++ b/arch/x86/power/cpu.c
26402@@ -131,7 +131,7 @@ static void do_fpu_end(void)
26403 static void fix_processor_context(void)
26404 {
26405 int cpu = smp_processor_id();
26406- struct tss_struct *t = &per_cpu(init_tss, cpu);
26407+ struct tss_struct *t = init_tss + cpu;
26408
26409 set_tss_desc(cpu, t); /*
26410 * This just modifies memory; should not be
26411@@ -141,7 +141,9 @@ static void fix_processor_context(void)
26412 */
26413
26414 #ifdef CONFIG_X86_64
26415+ pax_open_kernel();
26416 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26417+ pax_close_kernel();
26418
26419 syscall_init(); /* This sets MSR_*STAR and related */
26420 #endif
26421diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26422index 5d17950..2253fc9 100644
26423--- a/arch/x86/vdso/Makefile
26424+++ b/arch/x86/vdso/Makefile
26425@@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
26426 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
26427 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
26428
26429-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26430+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26431 GCOV_PROFILE := n
26432
26433 #
26434diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26435index 468d591..8e80a0a 100644
26436--- a/arch/x86/vdso/vdso32-setup.c
26437+++ b/arch/x86/vdso/vdso32-setup.c
26438@@ -25,6 +25,7 @@
26439 #include <asm/tlbflush.h>
26440 #include <asm/vdso.h>
26441 #include <asm/proto.h>
26442+#include <asm/mman.h>
26443
26444 enum {
26445 VDSO_DISABLED = 0,
26446@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26447 void enable_sep_cpu(void)
26448 {
26449 int cpu = get_cpu();
26450- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26451+ struct tss_struct *tss = init_tss + cpu;
26452
26453 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26454 put_cpu();
26455@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26456 gate_vma.vm_start = FIXADDR_USER_START;
26457 gate_vma.vm_end = FIXADDR_USER_END;
26458 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26459- gate_vma.vm_page_prot = __P101;
26460+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26461 /*
26462 * Make sure the vDSO gets into every core dump.
26463 * Dumping its contents makes post-mortem fully interpretable later
26464@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26465 if (compat)
26466 addr = VDSO_HIGH_BASE;
26467 else {
26468- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26469+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26470 if (IS_ERR_VALUE(addr)) {
26471 ret = addr;
26472 goto up_fail;
26473 }
26474 }
26475
26476- current->mm->context.vdso = (void *)addr;
26477+ current->mm->context.vdso = addr;
26478
26479 if (compat_uses_vma || !compat) {
26480 /*
26481@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26482 }
26483
26484 current_thread_info()->sysenter_return =
26485- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26486+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26487
26488 up_fail:
26489 if (ret)
26490- current->mm->context.vdso = NULL;
26491+ current->mm->context.vdso = 0;
26492
26493 up_write(&mm->mmap_sem);
26494
26495@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
26496
26497 const char *arch_vma_name(struct vm_area_struct *vma)
26498 {
26499- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26500+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26501 return "[vdso]";
26502+
26503+#ifdef CONFIG_PAX_SEGMEXEC
26504+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26505+ return "[vdso]";
26506+#endif
26507+
26508 return NULL;
26509 }
26510
26511@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26512 * Check to see if the corresponding task was created in compat vdso
26513 * mode.
26514 */
26515- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26516+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26517 return &gate_vma;
26518 return NULL;
26519 }
26520diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26521index 153407c..611cba9 100644
26522--- a/arch/x86/vdso/vma.c
26523+++ b/arch/x86/vdso/vma.c
26524@@ -16,8 +16,6 @@
26525 #include <asm/vdso.h>
26526 #include <asm/page.h>
26527
26528-unsigned int __read_mostly vdso_enabled = 1;
26529-
26530 extern char vdso_start[], vdso_end[];
26531 extern unsigned short vdso_sync_cpuid;
26532
26533@@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26534 * unaligned here as a result of stack start randomization.
26535 */
26536 addr = PAGE_ALIGN(addr);
26537- addr = align_addr(addr, NULL, ALIGN_VDSO);
26538
26539 return addr;
26540 }
26541@@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26542 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26543 {
26544 struct mm_struct *mm = current->mm;
26545- unsigned long addr;
26546+ unsigned long addr = 0;
26547 int ret;
26548
26549- if (!vdso_enabled)
26550- return 0;
26551-
26552 down_write(&mm->mmap_sem);
26553+
26554+#ifdef CONFIG_PAX_RANDMMAP
26555+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26556+#endif
26557+
26558 addr = vdso_addr(mm->start_stack, vdso_size);
26559+ addr = align_addr(addr, NULL, ALIGN_VDSO);
26560 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26561 if (IS_ERR_VALUE(addr)) {
26562 ret = addr;
26563 goto up_fail;
26564 }
26565
26566- current->mm->context.vdso = (void *)addr;
26567+ mm->context.vdso = addr;
26568
26569 ret = install_special_mapping(mm, addr, vdso_size,
26570 VM_READ|VM_EXEC|
26571 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
26572 VM_ALWAYSDUMP,
26573 vdso_pages);
26574- if (ret) {
26575- current->mm->context.vdso = NULL;
26576- goto up_fail;
26577- }
26578+
26579+ if (ret)
26580+ mm->context.vdso = 0;
26581
26582 up_fail:
26583 up_write(&mm->mmap_sem);
26584 return ret;
26585 }
26586-
26587-static __init int vdso_setup(char *s)
26588-{
26589- vdso_enabled = simple_strtoul(s, NULL, 0);
26590- return 0;
26591-}
26592-__setup("vdso=", vdso_setup);
26593diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26594index 4172af8..2c8ed7f 100644
26595--- a/arch/x86/xen/enlighten.c
26596+++ b/arch/x86/xen/enlighten.c
26597@@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26598
26599 struct shared_info xen_dummy_shared_info;
26600
26601-void *xen_initial_gdt;
26602-
26603 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
26604 __read_mostly int xen_have_vector_callback;
26605 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
26606@@ -1029,30 +1027,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
26607 #endif
26608 };
26609
26610-static void xen_reboot(int reason)
26611+static __noreturn void xen_reboot(int reason)
26612 {
26613 struct sched_shutdown r = { .reason = reason };
26614
26615- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
26616- BUG();
26617+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
26618+ BUG();
26619 }
26620
26621-static void xen_restart(char *msg)
26622+static __noreturn void xen_restart(char *msg)
26623 {
26624 xen_reboot(SHUTDOWN_reboot);
26625 }
26626
26627-static void xen_emergency_restart(void)
26628+static __noreturn void xen_emergency_restart(void)
26629 {
26630 xen_reboot(SHUTDOWN_reboot);
26631 }
26632
26633-static void xen_machine_halt(void)
26634+static __noreturn void xen_machine_halt(void)
26635 {
26636 xen_reboot(SHUTDOWN_poweroff);
26637 }
26638
26639-static void xen_machine_power_off(void)
26640+static __noreturn void xen_machine_power_off(void)
26641 {
26642 if (pm_power_off)
26643 pm_power_off();
26644@@ -1155,7 +1153,17 @@ asmlinkage void __init xen_start_kernel(void)
26645 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26646
26647 /* Work out if we support NX */
26648- x86_configure_nx();
26649+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26650+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26651+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26652+ unsigned l, h;
26653+
26654+ __supported_pte_mask |= _PAGE_NX;
26655+ rdmsr(MSR_EFER, l, h);
26656+ l |= EFER_NX;
26657+ wrmsr(MSR_EFER, l, h);
26658+ }
26659+#endif
26660
26661 xen_setup_features();
26662
26663@@ -1186,13 +1194,6 @@ asmlinkage void __init xen_start_kernel(void)
26664
26665 machine_ops = xen_machine_ops;
26666
26667- /*
26668- * The only reliable way to retain the initial address of the
26669- * percpu gdt_page is to remember it here, so we can go and
26670- * mark it RW later, when the initial percpu area is freed.
26671- */
26672- xen_initial_gdt = &per_cpu(gdt_page, 0);
26673-
26674 xen_smp_init();
26675
26676 #ifdef CONFIG_ACPI_NUMA
26677diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
26678index 95c1cf6..4bfa5be 100644
26679--- a/arch/x86/xen/mmu.c
26680+++ b/arch/x86/xen/mmu.c
26681@@ -1733,6 +1733,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
26682 convert_pfn_mfn(init_level4_pgt);
26683 convert_pfn_mfn(level3_ident_pgt);
26684 convert_pfn_mfn(level3_kernel_pgt);
26685+ convert_pfn_mfn(level3_vmalloc_start_pgt);
26686+ convert_pfn_mfn(level3_vmalloc_end_pgt);
26687+ convert_pfn_mfn(level3_vmemmap_pgt);
26688
26689 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
26690 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
26691@@ -1751,7 +1754,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
26692 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
26693 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
26694 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
26695+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
26696+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
26697+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
26698 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
26699+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
26700 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
26701 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
26702
26703@@ -1958,6 +1965,7 @@ static void __init xen_post_allocator_init(void)
26704 pv_mmu_ops.set_pud = xen_set_pud;
26705 #if PAGETABLE_LEVELS == 4
26706 pv_mmu_ops.set_pgd = xen_set_pgd;
26707+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
26708 #endif
26709
26710 /* This will work as long as patching hasn't happened yet
26711@@ -2039,6 +2047,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
26712 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
26713 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
26714 .set_pgd = xen_set_pgd_hyper,
26715+ .set_pgd_batched = xen_set_pgd_hyper,
26716
26717 .alloc_pud = xen_alloc_pmd_init,
26718 .release_pud = xen_release_pmd_init,
26719diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
26720index 501d4e0..e877605 100644
26721--- a/arch/x86/xen/smp.c
26722+++ b/arch/x86/xen/smp.c
26723@@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
26724 {
26725 BUG_ON(smp_processor_id() != 0);
26726 native_smp_prepare_boot_cpu();
26727-
26728- /* We've switched to the "real" per-cpu gdt, so make sure the
26729- old memory can be recycled */
26730- make_lowmem_page_readwrite(xen_initial_gdt);
26731-
26732 xen_filter_cpu_maps();
26733 xen_setup_vcpu_info_placement();
26734 }
26735@@ -275,12 +270,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
26736 gdt = get_cpu_gdt_table(cpu);
26737
26738 ctxt->flags = VGCF_IN_KERNEL;
26739- ctxt->user_regs.ds = __USER_DS;
26740- ctxt->user_regs.es = __USER_DS;
26741+ ctxt->user_regs.ds = __KERNEL_DS;
26742+ ctxt->user_regs.es = __KERNEL_DS;
26743 ctxt->user_regs.ss = __KERNEL_DS;
26744 #ifdef CONFIG_X86_32
26745 ctxt->user_regs.fs = __KERNEL_PERCPU;
26746- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
26747+ savesegment(gs, ctxt->user_regs.gs);
26748 #else
26749 ctxt->gs_base_kernel = per_cpu_offset(cpu);
26750 #endif
26751@@ -331,13 +326,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
26752 int rc;
26753
26754 per_cpu(current_task, cpu) = idle;
26755+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
26756 #ifdef CONFIG_X86_32
26757 irq_ctx_init(cpu);
26758 #else
26759 clear_tsk_thread_flag(idle, TIF_FORK);
26760- per_cpu(kernel_stack, cpu) =
26761- (unsigned long)task_stack_page(idle) -
26762- KERNEL_STACK_OFFSET + THREAD_SIZE;
26763+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26764 #endif
26765 xen_setup_runstate_info(cpu);
26766 xen_setup_timer(cpu);
26767diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
26768index b040b0e..8cc4fe0 100644
26769--- a/arch/x86/xen/xen-asm_32.S
26770+++ b/arch/x86/xen/xen-asm_32.S
26771@@ -83,14 +83,14 @@ ENTRY(xen_iret)
26772 ESP_OFFSET=4 # bytes pushed onto stack
26773
26774 /*
26775- * Store vcpu_info pointer for easy access. Do it this way to
26776- * avoid having to reload %fs
26777+ * Store vcpu_info pointer for easy access.
26778 */
26779 #ifdef CONFIG_SMP
26780- GET_THREAD_INFO(%eax)
26781- movl TI_cpu(%eax), %eax
26782- movl __per_cpu_offset(,%eax,4), %eax
26783- mov xen_vcpu(%eax), %eax
26784+ push %fs
26785+ mov $(__KERNEL_PERCPU), %eax
26786+ mov %eax, %fs
26787+ mov PER_CPU_VAR(xen_vcpu), %eax
26788+ pop %fs
26789 #else
26790 movl xen_vcpu, %eax
26791 #endif
26792diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
26793index aaa7291..3f77960 100644
26794--- a/arch/x86/xen/xen-head.S
26795+++ b/arch/x86/xen/xen-head.S
26796@@ -19,6 +19,17 @@ ENTRY(startup_xen)
26797 #ifdef CONFIG_X86_32
26798 mov %esi,xen_start_info
26799 mov $init_thread_union+THREAD_SIZE,%esp
26800+#ifdef CONFIG_SMP
26801+ movl $cpu_gdt_table,%edi
26802+ movl $__per_cpu_load,%eax
26803+ movw %ax,__KERNEL_PERCPU + 2(%edi)
26804+ rorl $16,%eax
26805+ movb %al,__KERNEL_PERCPU + 4(%edi)
26806+ movb %ah,__KERNEL_PERCPU + 7(%edi)
26807+ movl $__per_cpu_end - 1,%eax
26808+ subl $__per_cpu_start,%eax
26809+ movw %ax,__KERNEL_PERCPU + 0(%edi)
26810+#endif
26811 #else
26812 mov %rsi,xen_start_info
26813 mov $init_thread_union+THREAD_SIZE,%rsp
26814diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
26815index b095739..8c17bcd 100644
26816--- a/arch/x86/xen/xen-ops.h
26817+++ b/arch/x86/xen/xen-ops.h
26818@@ -10,8 +10,6 @@
26819 extern const char xen_hypervisor_callback[];
26820 extern const char xen_failsafe_callback[];
26821
26822-extern void *xen_initial_gdt;
26823-
26824 struct trap_info;
26825 void xen_copy_trap_info(struct trap_info *traps);
26826
26827diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
26828index 525bd3d..ef888b1 100644
26829--- a/arch/xtensa/variants/dc232b/include/variant/core.h
26830+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
26831@@ -119,9 +119,9 @@
26832 ----------------------------------------------------------------------*/
26833
26834 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
26835-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
26836 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
26837 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
26838+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26839
26840 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
26841 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
26842diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
26843index 2f33760..835e50a 100644
26844--- a/arch/xtensa/variants/fsf/include/variant/core.h
26845+++ b/arch/xtensa/variants/fsf/include/variant/core.h
26846@@ -11,6 +11,7 @@
26847 #ifndef _XTENSA_CORE_H
26848 #define _XTENSA_CORE_H
26849
26850+#include <linux/const.h>
26851
26852 /****************************************************************************
26853 Parameters Useful for Any Code, USER or PRIVILEGED
26854@@ -112,9 +113,9 @@
26855 ----------------------------------------------------------------------*/
26856
26857 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
26858-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
26859 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
26860 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
26861+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26862
26863 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
26864 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
26865diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
26866index af00795..2bb8105 100644
26867--- a/arch/xtensa/variants/s6000/include/variant/core.h
26868+++ b/arch/xtensa/variants/s6000/include/variant/core.h
26869@@ -11,6 +11,7 @@
26870 #ifndef _XTENSA_CORE_CONFIGURATION_H
26871 #define _XTENSA_CORE_CONFIGURATION_H
26872
26873+#include <linux/const.h>
26874
26875 /****************************************************************************
26876 Parameters Useful for Any Code, USER or PRIVILEGED
26877@@ -118,9 +119,9 @@
26878 ----------------------------------------------------------------------*/
26879
26880 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
26881-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
26882 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
26883 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
26884+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
26885
26886 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
26887 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
26888diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
26889index 58916af..9cb880b 100644
26890--- a/block/blk-iopoll.c
26891+++ b/block/blk-iopoll.c
26892@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
26893 }
26894 EXPORT_SYMBOL(blk_iopoll_complete);
26895
26896-static void blk_iopoll_softirq(struct softirq_action *h)
26897+static void blk_iopoll_softirq(void)
26898 {
26899 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
26900 int rearm = 0, budget = blk_iopoll_budget;
26901diff --git a/block/blk-map.c b/block/blk-map.c
26902index 623e1cd..ca1e109 100644
26903--- a/block/blk-map.c
26904+++ b/block/blk-map.c
26905@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
26906 if (!len || !kbuf)
26907 return -EINVAL;
26908
26909- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
26910+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
26911 if (do_copy)
26912 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
26913 else
26914diff --git a/block/blk-softirq.c b/block/blk-softirq.c
26915index 1366a89..e17f54b 100644
26916--- a/block/blk-softirq.c
26917+++ b/block/blk-softirq.c
26918@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
26919 * Softirq action handler - move entries to local list and loop over them
26920 * while passing them to the queue registered handler.
26921 */
26922-static void blk_done_softirq(struct softirq_action *h)
26923+static void blk_done_softirq(void)
26924 {
26925 struct list_head *cpu_list, local_list;
26926
26927diff --git a/block/bsg.c b/block/bsg.c
26928index ff64ae3..593560c 100644
26929--- a/block/bsg.c
26930+++ b/block/bsg.c
26931@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
26932 struct sg_io_v4 *hdr, struct bsg_device *bd,
26933 fmode_t has_write_perm)
26934 {
26935+ unsigned char tmpcmd[sizeof(rq->__cmd)];
26936+ unsigned char *cmdptr;
26937+
26938 if (hdr->request_len > BLK_MAX_CDB) {
26939 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
26940 if (!rq->cmd)
26941 return -ENOMEM;
26942- }
26943+ cmdptr = rq->cmd;
26944+ } else
26945+ cmdptr = tmpcmd;
26946
26947- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
26948+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
26949 hdr->request_len))
26950 return -EFAULT;
26951
26952+ if (cmdptr != rq->cmd)
26953+ memcpy(rq->cmd, cmdptr, hdr->request_len);
26954+
26955 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
26956 if (blk_verify_command(rq->cmd, has_write_perm))
26957 return -EPERM;
26958diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
26959index 7c668c8..db3521c 100644
26960--- a/block/compat_ioctl.c
26961+++ b/block/compat_ioctl.c
26962@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
26963 err |= __get_user(f->spec1, &uf->spec1);
26964 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
26965 err |= __get_user(name, &uf->name);
26966- f->name = compat_ptr(name);
26967+ f->name = (void __force_kernel *)compat_ptr(name);
26968 if (err) {
26969 err = -EFAULT;
26970 goto out;
26971diff --git a/block/partitions/efi.c b/block/partitions/efi.c
26972index 6296b40..417c00f 100644
26973--- a/block/partitions/efi.c
26974+++ b/block/partitions/efi.c
26975@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
26976 if (!gpt)
26977 return NULL;
26978
26979+ if (!le32_to_cpu(gpt->num_partition_entries))
26980+ return NULL;
26981+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
26982+ if (!pte)
26983+ return NULL;
26984+
26985 count = le32_to_cpu(gpt->num_partition_entries) *
26986 le32_to_cpu(gpt->sizeof_partition_entry);
26987- if (!count)
26988- return NULL;
26989- pte = kzalloc(count, GFP_KERNEL);
26990- if (!pte)
26991- return NULL;
26992-
26993 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
26994 (u8 *) pte,
26995 count) < count) {
26996diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
26997index 260fa80..e8f3caf 100644
26998--- a/block/scsi_ioctl.c
26999+++ b/block/scsi_ioctl.c
27000@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
27001 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27002 struct sg_io_hdr *hdr, fmode_t mode)
27003 {
27004- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27005+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27006+ unsigned char *cmdptr;
27007+
27008+ if (rq->cmd != rq->__cmd)
27009+ cmdptr = rq->cmd;
27010+ else
27011+ cmdptr = tmpcmd;
27012+
27013+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27014 return -EFAULT;
27015+
27016+ if (cmdptr != rq->cmd)
27017+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27018+
27019 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27020 return -EPERM;
27021
27022@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27023 int err;
27024 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27025 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27026+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27027+ unsigned char *cmdptr;
27028
27029 if (!sic)
27030 return -EINVAL;
27031@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27032 */
27033 err = -EFAULT;
27034 rq->cmd_len = cmdlen;
27035- if (copy_from_user(rq->cmd, sic->data, cmdlen))
27036+
27037+ if (rq->cmd != rq->__cmd)
27038+ cmdptr = rq->cmd;
27039+ else
27040+ cmdptr = tmpcmd;
27041+
27042+ if (copy_from_user(cmdptr, sic->data, cmdlen))
27043 goto error;
27044
27045+ if (rq->cmd != cmdptr)
27046+ memcpy(rq->cmd, cmdptr, cmdlen);
27047+
27048 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27049 goto error;
27050
27051diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
27052index a0f768c..1da9c73 100644
27053--- a/crypto/ablkcipher.c
27054+++ b/crypto/ablkcipher.c
27055@@ -307,6 +307,8 @@ int ablkcipher_walk_phys(struct ablkcipher_request *req,
27056 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
27057
27058 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
27059+ unsigned int keylen) __size_overflow(3);
27060+static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
27061 unsigned int keylen)
27062 {
27063 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
27064@@ -329,6 +331,8 @@ static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
27065 }
27066
27067 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
27068+ unsigned int keylen) __size_overflow(3);
27069+static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
27070 unsigned int keylen)
27071 {
27072 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
27073diff --git a/crypto/aead.c b/crypto/aead.c
27074index 04add3dc..983032f 100644
27075--- a/crypto/aead.c
27076+++ b/crypto/aead.c
27077@@ -27,6 +27,8 @@
27078 #include "internal.h"
27079
27080 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
27081+ unsigned int keylen) __size_overflow(3);
27082+static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
27083 unsigned int keylen)
27084 {
27085 struct aead_alg *aead = crypto_aead_alg(tfm);
27086@@ -48,6 +50,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
27087 return ret;
27088 }
27089
27090+static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
27091 static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
27092 {
27093 struct aead_alg *aead = crypto_aead_alg(tfm);
27094diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
27095index 1e61d1a..cf06b86 100644
27096--- a/crypto/blkcipher.c
27097+++ b/crypto/blkcipher.c
27098@@ -359,6 +359,8 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
27099 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
27100
27101 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27102+ unsigned int keylen) __size_overflow(3);
27103+static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27104 unsigned int keylen)
27105 {
27106 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
27107@@ -380,6 +382,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27108 return ret;
27109 }
27110
27111+static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
27112 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
27113 {
27114 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
27115diff --git a/crypto/cipher.c b/crypto/cipher.c
27116index 39541e0..802d956 100644
27117--- a/crypto/cipher.c
27118+++ b/crypto/cipher.c
27119@@ -21,6 +21,8 @@
27120 #include "internal.h"
27121
27122 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27123+ unsigned int keylen) __size_overflow(3);
27124+static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27125 unsigned int keylen)
27126 {
27127 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
27128@@ -43,6 +45,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27129
27130 }
27131
27132+static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
27133 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
27134 {
27135 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
27136diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27137index 671d4d6..5f24030 100644
27138--- a/crypto/cryptd.c
27139+++ b/crypto/cryptd.c
27140@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27141
27142 struct cryptd_blkcipher_request_ctx {
27143 crypto_completion_t complete;
27144-};
27145+} __no_const;
27146
27147 struct cryptd_hash_ctx {
27148 struct crypto_shash *child;
27149@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27150
27151 struct cryptd_aead_request_ctx {
27152 crypto_completion_t complete;
27153-};
27154+} __no_const;
27155
27156 static void cryptd_queue_worker(struct work_struct *work);
27157
27158diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27159index 5d41894..22021e4 100644
27160--- a/drivers/acpi/apei/cper.c
27161+++ b/drivers/acpi/apei/cper.c
27162@@ -38,12 +38,12 @@
27163 */
27164 u64 cper_next_record_id(void)
27165 {
27166- static atomic64_t seq;
27167+ static atomic64_unchecked_t seq;
27168
27169- if (!atomic64_read(&seq))
27170- atomic64_set(&seq, ((u64)get_seconds()) << 32);
27171+ if (!atomic64_read_unchecked(&seq))
27172+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27173
27174- return atomic64_inc_return(&seq);
27175+ return atomic64_inc_return_unchecked(&seq);
27176 }
27177 EXPORT_SYMBOL_GPL(cper_next_record_id);
27178
27179diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27180index 86933ca..5cb1a69 100644
27181--- a/drivers/acpi/battery.c
27182+++ b/drivers/acpi/battery.c
27183@@ -787,6 +787,9 @@ static int acpi_battery_print_alarm(struct seq_file *seq, int result)
27184
27185 static ssize_t acpi_battery_write_alarm(struct file *file,
27186 const char __user * buffer,
27187+ size_t count, loff_t * ppos) __size_overflow(3);
27188+static ssize_t acpi_battery_write_alarm(struct file *file,
27189+ const char __user * buffer,
27190 size_t count, loff_t * ppos)
27191 {
27192 int result = 0;
27193diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
27194index b258cab..3fb7da7 100644
27195--- a/drivers/acpi/ec_sys.c
27196+++ b/drivers/acpi/ec_sys.c
27197@@ -12,6 +12,7 @@
27198 #include <linux/acpi.h>
27199 #include <linux/debugfs.h>
27200 #include <linux/module.h>
27201+#include <linux/uaccess.h>
27202 #include "internal.h"
27203
27204 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
27205@@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27206 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27207 */
27208 unsigned int size = EC_SPACE_SIZE;
27209- u8 *data = (u8 *) buf;
27210+ u8 data;
27211 loff_t init_off = *off;
27212 int err = 0;
27213
27214@@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27215 size = count;
27216
27217 while (size) {
27218- err = ec_read(*off, &data[*off - init_off]);
27219+ err = ec_read(*off, &data);
27220 if (err)
27221 return err;
27222+ if (put_user(data, &buf[*off - init_off]))
27223+ return -EFAULT;
27224 *off += 1;
27225 size--;
27226 }
27227@@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27228
27229 unsigned int size = count;
27230 loff_t init_off = *off;
27231- u8 *data = (u8 *) buf;
27232 int err = 0;
27233
27234 if (*off >= EC_SPACE_SIZE)
27235@@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27236 }
27237
27238 while (size) {
27239- u8 byte_write = data[*off - init_off];
27240+ u8 byte_write;
27241+ if (get_user(byte_write, &buf[*off - init_off]))
27242+ return -EFAULT;
27243 err = ec_write(*off, byte_write);
27244 if (err)
27245 return err;
27246diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27247index 251c7b62..000462d 100644
27248--- a/drivers/acpi/proc.c
27249+++ b/drivers/acpi/proc.c
27250@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
27251 size_t count, loff_t * ppos)
27252 {
27253 struct list_head *node, *next;
27254- char strbuf[5];
27255- char str[5] = "";
27256- unsigned int len = count;
27257+ char strbuf[5] = {0};
27258
27259- if (len > 4)
27260- len = 4;
27261- if (len < 0)
27262+ if (count > 4)
27263+ count = 4;
27264+ if (copy_from_user(strbuf, buffer, count))
27265 return -EFAULT;
27266-
27267- if (copy_from_user(strbuf, buffer, len))
27268- return -EFAULT;
27269- strbuf[len] = '\0';
27270- sscanf(strbuf, "%s", str);
27271+ strbuf[count] = '\0';
27272
27273 mutex_lock(&acpi_device_lock);
27274 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27275@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
27276 if (!dev->wakeup.flags.valid)
27277 continue;
27278
27279- if (!strncmp(dev->pnp.bus_id, str, 4)) {
27280+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27281 if (device_can_wakeup(&dev->dev)) {
27282 bool enable = !device_may_wakeup(&dev->dev);
27283 device_set_wakeup_enable(&dev->dev, enable);
27284diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
27285index 8ae05ce..7dbbed9 100644
27286--- a/drivers/acpi/processor_driver.c
27287+++ b/drivers/acpi/processor_driver.c
27288@@ -555,7 +555,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27289 return 0;
27290 #endif
27291
27292- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27293+ BUG_ON(pr->id >= nr_cpu_ids);
27294
27295 /*
27296 * Buggy BIOS check
27297diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
27298index 6e36d0c..f319944 100644
27299--- a/drivers/acpi/sbs.c
27300+++ b/drivers/acpi/sbs.c
27301@@ -655,6 +655,9 @@ static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
27302
27303 static ssize_t
27304 acpi_battery_write_alarm(struct file *file, const char __user * buffer,
27305+ size_t count, loff_t * ppos) __size_overflow(3);
27306+static ssize_t
27307+acpi_battery_write_alarm(struct file *file, const char __user * buffer,
27308 size_t count, loff_t * ppos)
27309 {
27310 struct seq_file *seq = file->private_data;
27311diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27312index c06e0ec..a2c06ba 100644
27313--- a/drivers/ata/libata-core.c
27314+++ b/drivers/ata/libata-core.c
27315@@ -4736,7 +4736,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27316 struct ata_port *ap;
27317 unsigned int tag;
27318
27319- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27320+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27321 ap = qc->ap;
27322
27323 qc->flags = 0;
27324@@ -4752,7 +4752,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27325 struct ata_port *ap;
27326 struct ata_link *link;
27327
27328- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27329+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27330 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27331 ap = qc->ap;
27332 link = qc->dev->link;
27333@@ -5816,6 +5816,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27334 return;
27335
27336 spin_lock(&lock);
27337+ pax_open_kernel();
27338
27339 for (cur = ops->inherits; cur; cur = cur->inherits) {
27340 void **inherit = (void **)cur;
27341@@ -5829,8 +5830,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27342 if (IS_ERR(*pp))
27343 *pp = NULL;
27344
27345- ops->inherits = NULL;
27346+ *(struct ata_port_operations **)&ops->inherits = NULL;
27347
27348+ pax_close_kernel();
27349 spin_unlock(&lock);
27350 }
27351
27352diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
27353index 048589f..4002b98 100644
27354--- a/drivers/ata/pata_arasan_cf.c
27355+++ b/drivers/ata/pata_arasan_cf.c
27356@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
27357 /* Handle platform specific quirks */
27358 if (pdata->quirk) {
27359 if (pdata->quirk & CF_BROKEN_PIO) {
27360- ap->ops->set_piomode = NULL;
27361+ pax_open_kernel();
27362+ *(void **)&ap->ops->set_piomode = NULL;
27363+ pax_close_kernel();
27364 ap->pio_mask = 0;
27365 }
27366 if (pdata->quirk & CF_BROKEN_MWDMA)
27367diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
27368index f9b983a..887b9d8 100644
27369--- a/drivers/atm/adummy.c
27370+++ b/drivers/atm/adummy.c
27371@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
27372 vcc->pop(vcc, skb);
27373 else
27374 dev_kfree_skb_any(skb);
27375- atomic_inc(&vcc->stats->tx);
27376+ atomic_inc_unchecked(&vcc->stats->tx);
27377
27378 return 0;
27379 }
27380diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
27381index f8f41e0..1f987dd 100644
27382--- a/drivers/atm/ambassador.c
27383+++ b/drivers/atm/ambassador.c
27384@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
27385 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27386
27387 // VC layer stats
27388- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27389+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27390
27391 // free the descriptor
27392 kfree (tx_descr);
27393@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27394 dump_skb ("<<<", vc, skb);
27395
27396 // VC layer stats
27397- atomic_inc(&atm_vcc->stats->rx);
27398+ atomic_inc_unchecked(&atm_vcc->stats->rx);
27399 __net_timestamp(skb);
27400 // end of our responsibility
27401 atm_vcc->push (atm_vcc, skb);
27402@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27403 } else {
27404 PRINTK (KERN_INFO, "dropped over-size frame");
27405 // should we count this?
27406- atomic_inc(&atm_vcc->stats->rx_drop);
27407+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27408 }
27409
27410 } else {
27411@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
27412 }
27413
27414 if (check_area (skb->data, skb->len)) {
27415- atomic_inc(&atm_vcc->stats->tx_err);
27416+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27417 return -ENOMEM; // ?
27418 }
27419
27420diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
27421index b22d71c..d6e1049 100644
27422--- a/drivers/atm/atmtcp.c
27423+++ b/drivers/atm/atmtcp.c
27424@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27425 if (vcc->pop) vcc->pop(vcc,skb);
27426 else dev_kfree_skb(skb);
27427 if (dev_data) return 0;
27428- atomic_inc(&vcc->stats->tx_err);
27429+ atomic_inc_unchecked(&vcc->stats->tx_err);
27430 return -ENOLINK;
27431 }
27432 size = skb->len+sizeof(struct atmtcp_hdr);
27433@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27434 if (!new_skb) {
27435 if (vcc->pop) vcc->pop(vcc,skb);
27436 else dev_kfree_skb(skb);
27437- atomic_inc(&vcc->stats->tx_err);
27438+ atomic_inc_unchecked(&vcc->stats->tx_err);
27439 return -ENOBUFS;
27440 }
27441 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
27442@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27443 if (vcc->pop) vcc->pop(vcc,skb);
27444 else dev_kfree_skb(skb);
27445 out_vcc->push(out_vcc,new_skb);
27446- atomic_inc(&vcc->stats->tx);
27447- atomic_inc(&out_vcc->stats->rx);
27448+ atomic_inc_unchecked(&vcc->stats->tx);
27449+ atomic_inc_unchecked(&out_vcc->stats->rx);
27450 return 0;
27451 }
27452
27453@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27454 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
27455 read_unlock(&vcc_sklist_lock);
27456 if (!out_vcc) {
27457- atomic_inc(&vcc->stats->tx_err);
27458+ atomic_inc_unchecked(&vcc->stats->tx_err);
27459 goto done;
27460 }
27461 skb_pull(skb,sizeof(struct atmtcp_hdr));
27462@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27463 __net_timestamp(new_skb);
27464 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
27465 out_vcc->push(out_vcc,new_skb);
27466- atomic_inc(&vcc->stats->tx);
27467- atomic_inc(&out_vcc->stats->rx);
27468+ atomic_inc_unchecked(&vcc->stats->tx);
27469+ atomic_inc_unchecked(&out_vcc->stats->rx);
27470 done:
27471 if (vcc->pop) vcc->pop(vcc,skb);
27472 else dev_kfree_skb(skb);
27473diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
27474index 956e9ac..133516d 100644
27475--- a/drivers/atm/eni.c
27476+++ b/drivers/atm/eni.c
27477@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
27478 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
27479 vcc->dev->number);
27480 length = 0;
27481- atomic_inc(&vcc->stats->rx_err);
27482+ atomic_inc_unchecked(&vcc->stats->rx_err);
27483 }
27484 else {
27485 length = ATM_CELL_SIZE-1; /* no HEC */
27486@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27487 size);
27488 }
27489 eff = length = 0;
27490- atomic_inc(&vcc->stats->rx_err);
27491+ atomic_inc_unchecked(&vcc->stats->rx_err);
27492 }
27493 else {
27494 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
27495@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27496 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
27497 vcc->dev->number,vcc->vci,length,size << 2,descr);
27498 length = eff = 0;
27499- atomic_inc(&vcc->stats->rx_err);
27500+ atomic_inc_unchecked(&vcc->stats->rx_err);
27501 }
27502 }
27503 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
27504@@ -771,7 +771,7 @@ rx_dequeued++;
27505 vcc->push(vcc,skb);
27506 pushed++;
27507 }
27508- atomic_inc(&vcc->stats->rx);
27509+ atomic_inc_unchecked(&vcc->stats->rx);
27510 }
27511 wake_up(&eni_dev->rx_wait);
27512 }
27513@@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
27514 PCI_DMA_TODEVICE);
27515 if (vcc->pop) vcc->pop(vcc,skb);
27516 else dev_kfree_skb_irq(skb);
27517- atomic_inc(&vcc->stats->tx);
27518+ atomic_inc_unchecked(&vcc->stats->tx);
27519 wake_up(&eni_dev->tx_wait);
27520 dma_complete++;
27521 }
27522@@ -1569,7 +1569,7 @@ tx_complete++;
27523 /*--------------------------------- entries ---------------------------------*/
27524
27525
27526-static const char *media_name[] __devinitdata = {
27527+static const char *media_name[] __devinitconst = {
27528 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
27529 "UTP", "05?", "06?", "07?", /* 4- 7 */
27530 "TAXI","09?", "10?", "11?", /* 8-11 */
27531diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
27532index 5072f8a..fa52520d 100644
27533--- a/drivers/atm/firestream.c
27534+++ b/drivers/atm/firestream.c
27535@@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
27536 }
27537 }
27538
27539- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27540+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27541
27542 fs_dprintk (FS_DEBUG_TXMEM, "i");
27543 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
27544@@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27545 #endif
27546 skb_put (skb, qe->p1 & 0xffff);
27547 ATM_SKB(skb)->vcc = atm_vcc;
27548- atomic_inc(&atm_vcc->stats->rx);
27549+ atomic_inc_unchecked(&atm_vcc->stats->rx);
27550 __net_timestamp(skb);
27551 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
27552 atm_vcc->push (atm_vcc, skb);
27553@@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27554 kfree (pe);
27555 }
27556 if (atm_vcc)
27557- atomic_inc(&atm_vcc->stats->rx_drop);
27558+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27559 break;
27560 case 0x1f: /* Reassembly abort: no buffers. */
27561 /* Silently increment error counter. */
27562 if (atm_vcc)
27563- atomic_inc(&atm_vcc->stats->rx_drop);
27564+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27565 break;
27566 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
27567 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
27568diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
27569index 361f5ae..7fc552d 100644
27570--- a/drivers/atm/fore200e.c
27571+++ b/drivers/atm/fore200e.c
27572@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
27573 #endif
27574 /* check error condition */
27575 if (*entry->status & STATUS_ERROR)
27576- atomic_inc(&vcc->stats->tx_err);
27577+ atomic_inc_unchecked(&vcc->stats->tx_err);
27578 else
27579- atomic_inc(&vcc->stats->tx);
27580+ atomic_inc_unchecked(&vcc->stats->tx);
27581 }
27582 }
27583
27584@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27585 if (skb == NULL) {
27586 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
27587
27588- atomic_inc(&vcc->stats->rx_drop);
27589+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27590 return -ENOMEM;
27591 }
27592
27593@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27594
27595 dev_kfree_skb_any(skb);
27596
27597- atomic_inc(&vcc->stats->rx_drop);
27598+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27599 return -ENOMEM;
27600 }
27601
27602 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27603
27604 vcc->push(vcc, skb);
27605- atomic_inc(&vcc->stats->rx);
27606+ atomic_inc_unchecked(&vcc->stats->rx);
27607
27608 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27609
27610@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
27611 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
27612 fore200e->atm_dev->number,
27613 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
27614- atomic_inc(&vcc->stats->rx_err);
27615+ atomic_inc_unchecked(&vcc->stats->rx_err);
27616 }
27617 }
27618
27619@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
27620 goto retry_here;
27621 }
27622
27623- atomic_inc(&vcc->stats->tx_err);
27624+ atomic_inc_unchecked(&vcc->stats->tx_err);
27625
27626 fore200e->tx_sat++;
27627 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
27628diff --git a/drivers/atm/he.c b/drivers/atm/he.c
27629index b182c2f..1c6fa8a 100644
27630--- a/drivers/atm/he.c
27631+++ b/drivers/atm/he.c
27632@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27633
27634 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
27635 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
27636- atomic_inc(&vcc->stats->rx_drop);
27637+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27638 goto return_host_buffers;
27639 }
27640
27641@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27642 RBRQ_LEN_ERR(he_dev->rbrq_head)
27643 ? "LEN_ERR" : "",
27644 vcc->vpi, vcc->vci);
27645- atomic_inc(&vcc->stats->rx_err);
27646+ atomic_inc_unchecked(&vcc->stats->rx_err);
27647 goto return_host_buffers;
27648 }
27649
27650@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27651 vcc->push(vcc, skb);
27652 spin_lock(&he_dev->global_lock);
27653
27654- atomic_inc(&vcc->stats->rx);
27655+ atomic_inc_unchecked(&vcc->stats->rx);
27656
27657 return_host_buffers:
27658 ++pdus_assembled;
27659@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
27660 tpd->vcc->pop(tpd->vcc, tpd->skb);
27661 else
27662 dev_kfree_skb_any(tpd->skb);
27663- atomic_inc(&tpd->vcc->stats->tx_err);
27664+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
27665 }
27666 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
27667 return;
27668@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27669 vcc->pop(vcc, skb);
27670 else
27671 dev_kfree_skb_any(skb);
27672- atomic_inc(&vcc->stats->tx_err);
27673+ atomic_inc_unchecked(&vcc->stats->tx_err);
27674 return -EINVAL;
27675 }
27676
27677@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27678 vcc->pop(vcc, skb);
27679 else
27680 dev_kfree_skb_any(skb);
27681- atomic_inc(&vcc->stats->tx_err);
27682+ atomic_inc_unchecked(&vcc->stats->tx_err);
27683 return -EINVAL;
27684 }
27685 #endif
27686@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27687 vcc->pop(vcc, skb);
27688 else
27689 dev_kfree_skb_any(skb);
27690- atomic_inc(&vcc->stats->tx_err);
27691+ atomic_inc_unchecked(&vcc->stats->tx_err);
27692 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27693 return -ENOMEM;
27694 }
27695@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27696 vcc->pop(vcc, skb);
27697 else
27698 dev_kfree_skb_any(skb);
27699- atomic_inc(&vcc->stats->tx_err);
27700+ atomic_inc_unchecked(&vcc->stats->tx_err);
27701 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27702 return -ENOMEM;
27703 }
27704@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27705 __enqueue_tpd(he_dev, tpd, cid);
27706 spin_unlock_irqrestore(&he_dev->global_lock, flags);
27707
27708- atomic_inc(&vcc->stats->tx);
27709+ atomic_inc_unchecked(&vcc->stats->tx);
27710
27711 return 0;
27712 }
27713diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
27714index b812103..e391a49 100644
27715--- a/drivers/atm/horizon.c
27716+++ b/drivers/atm/horizon.c
27717@@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
27718 {
27719 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
27720 // VC layer stats
27721- atomic_inc(&vcc->stats->rx);
27722+ atomic_inc_unchecked(&vcc->stats->rx);
27723 __net_timestamp(skb);
27724 // end of our responsibility
27725 vcc->push (vcc, skb);
27726@@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
27727 dev->tx_iovec = NULL;
27728
27729 // VC layer stats
27730- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27731+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27732
27733 // free the skb
27734 hrz_kfree_skb (skb);
27735diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
27736index 1c05212..c28e200 100644
27737--- a/drivers/atm/idt77252.c
27738+++ b/drivers/atm/idt77252.c
27739@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
27740 else
27741 dev_kfree_skb(skb);
27742
27743- atomic_inc(&vcc->stats->tx);
27744+ atomic_inc_unchecked(&vcc->stats->tx);
27745 }
27746
27747 atomic_dec(&scq->used);
27748@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27749 if ((sb = dev_alloc_skb(64)) == NULL) {
27750 printk("%s: Can't allocate buffers for aal0.\n",
27751 card->name);
27752- atomic_add(i, &vcc->stats->rx_drop);
27753+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
27754 break;
27755 }
27756 if (!atm_charge(vcc, sb->truesize)) {
27757 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
27758 card->name);
27759- atomic_add(i - 1, &vcc->stats->rx_drop);
27760+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
27761 dev_kfree_skb(sb);
27762 break;
27763 }
27764@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27765 ATM_SKB(sb)->vcc = vcc;
27766 __net_timestamp(sb);
27767 vcc->push(vcc, sb);
27768- atomic_inc(&vcc->stats->rx);
27769+ atomic_inc_unchecked(&vcc->stats->rx);
27770
27771 cell += ATM_CELL_PAYLOAD;
27772 }
27773@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27774 "(CDC: %08x)\n",
27775 card->name, len, rpp->len, readl(SAR_REG_CDC));
27776 recycle_rx_pool_skb(card, rpp);
27777- atomic_inc(&vcc->stats->rx_err);
27778+ atomic_inc_unchecked(&vcc->stats->rx_err);
27779 return;
27780 }
27781 if (stat & SAR_RSQE_CRC) {
27782 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
27783 recycle_rx_pool_skb(card, rpp);
27784- atomic_inc(&vcc->stats->rx_err);
27785+ atomic_inc_unchecked(&vcc->stats->rx_err);
27786 return;
27787 }
27788 if (skb_queue_len(&rpp->queue) > 1) {
27789@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27790 RXPRINTK("%s: Can't alloc RX skb.\n",
27791 card->name);
27792 recycle_rx_pool_skb(card, rpp);
27793- atomic_inc(&vcc->stats->rx_err);
27794+ atomic_inc_unchecked(&vcc->stats->rx_err);
27795 return;
27796 }
27797 if (!atm_charge(vcc, skb->truesize)) {
27798@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27799 __net_timestamp(skb);
27800
27801 vcc->push(vcc, skb);
27802- atomic_inc(&vcc->stats->rx);
27803+ atomic_inc_unchecked(&vcc->stats->rx);
27804
27805 return;
27806 }
27807@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
27808 __net_timestamp(skb);
27809
27810 vcc->push(vcc, skb);
27811- atomic_inc(&vcc->stats->rx);
27812+ atomic_inc_unchecked(&vcc->stats->rx);
27813
27814 if (skb->truesize > SAR_FB_SIZE_3)
27815 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
27816@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
27817 if (vcc->qos.aal != ATM_AAL0) {
27818 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
27819 card->name, vpi, vci);
27820- atomic_inc(&vcc->stats->rx_drop);
27821+ atomic_inc_unchecked(&vcc->stats->rx_drop);
27822 goto drop;
27823 }
27824
27825 if ((sb = dev_alloc_skb(64)) == NULL) {
27826 printk("%s: Can't allocate buffers for AAL0.\n",
27827 card->name);
27828- atomic_inc(&vcc->stats->rx_err);
27829+ atomic_inc_unchecked(&vcc->stats->rx_err);
27830 goto drop;
27831 }
27832
27833@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
27834 ATM_SKB(sb)->vcc = vcc;
27835 __net_timestamp(sb);
27836 vcc->push(vcc, sb);
27837- atomic_inc(&vcc->stats->rx);
27838+ atomic_inc_unchecked(&vcc->stats->rx);
27839
27840 drop:
27841 skb_pull(queue, 64);
27842@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27843
27844 if (vc == NULL) {
27845 printk("%s: NULL connection in send().\n", card->name);
27846- atomic_inc(&vcc->stats->tx_err);
27847+ atomic_inc_unchecked(&vcc->stats->tx_err);
27848 dev_kfree_skb(skb);
27849 return -EINVAL;
27850 }
27851 if (!test_bit(VCF_TX, &vc->flags)) {
27852 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
27853- atomic_inc(&vcc->stats->tx_err);
27854+ atomic_inc_unchecked(&vcc->stats->tx_err);
27855 dev_kfree_skb(skb);
27856 return -EINVAL;
27857 }
27858@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27859 break;
27860 default:
27861 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
27862- atomic_inc(&vcc->stats->tx_err);
27863+ atomic_inc_unchecked(&vcc->stats->tx_err);
27864 dev_kfree_skb(skb);
27865 return -EINVAL;
27866 }
27867
27868 if (skb_shinfo(skb)->nr_frags != 0) {
27869 printk("%s: No scatter-gather yet.\n", card->name);
27870- atomic_inc(&vcc->stats->tx_err);
27871+ atomic_inc_unchecked(&vcc->stats->tx_err);
27872 dev_kfree_skb(skb);
27873 return -EINVAL;
27874 }
27875@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
27876
27877 err = queue_skb(card, vc, skb, oam);
27878 if (err) {
27879- atomic_inc(&vcc->stats->tx_err);
27880+ atomic_inc_unchecked(&vcc->stats->tx_err);
27881 dev_kfree_skb(skb);
27882 return err;
27883 }
27884@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
27885 skb = dev_alloc_skb(64);
27886 if (!skb) {
27887 printk("%s: Out of memory in send_oam().\n", card->name);
27888- atomic_inc(&vcc->stats->tx_err);
27889+ atomic_inc_unchecked(&vcc->stats->tx_err);
27890 return -ENOMEM;
27891 }
27892 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
27893diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
27894index 9e373ba..cf93727 100644
27895--- a/drivers/atm/iphase.c
27896+++ b/drivers/atm/iphase.c
27897@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
27898 status = (u_short) (buf_desc_ptr->desc_mode);
27899 if (status & (RX_CER | RX_PTE | RX_OFL))
27900 {
27901- atomic_inc(&vcc->stats->rx_err);
27902+ atomic_inc_unchecked(&vcc->stats->rx_err);
27903 IF_ERR(printk("IA: bad packet, dropping it");)
27904 if (status & RX_CER) {
27905 IF_ERR(printk(" cause: packet CRC error\n");)
27906@@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
27907 len = dma_addr - buf_addr;
27908 if (len > iadev->rx_buf_sz) {
27909 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
27910- atomic_inc(&vcc->stats->rx_err);
27911+ atomic_inc_unchecked(&vcc->stats->rx_err);
27912 goto out_free_desc;
27913 }
27914
27915@@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27916 ia_vcc = INPH_IA_VCC(vcc);
27917 if (ia_vcc == NULL)
27918 {
27919- atomic_inc(&vcc->stats->rx_err);
27920+ atomic_inc_unchecked(&vcc->stats->rx_err);
27921 atm_return(vcc, skb->truesize);
27922 dev_kfree_skb_any(skb);
27923 goto INCR_DLE;
27924@@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27925 if ((length > iadev->rx_buf_sz) || (length >
27926 (skb->len - sizeof(struct cpcs_trailer))))
27927 {
27928- atomic_inc(&vcc->stats->rx_err);
27929+ atomic_inc_unchecked(&vcc->stats->rx_err);
27930 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
27931 length, skb->len);)
27932 atm_return(vcc, skb->truesize);
27933@@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
27934
27935 IF_RX(printk("rx_dle_intr: skb push");)
27936 vcc->push(vcc,skb);
27937- atomic_inc(&vcc->stats->rx);
27938+ atomic_inc_unchecked(&vcc->stats->rx);
27939 iadev->rx_pkt_cnt++;
27940 }
27941 INCR_DLE:
27942@@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
27943 {
27944 struct k_sonet_stats *stats;
27945 stats = &PRIV(_ia_dev[board])->sonet_stats;
27946- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
27947- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
27948- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
27949- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
27950- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
27951- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
27952- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
27953- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
27954- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
27955+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
27956+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
27957+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
27958+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
27959+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
27960+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
27961+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
27962+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
27963+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
27964 }
27965 ia_cmds.status = 0;
27966 break;
27967@@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
27968 if ((desc == 0) || (desc > iadev->num_tx_desc))
27969 {
27970 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
27971- atomic_inc(&vcc->stats->tx);
27972+ atomic_inc_unchecked(&vcc->stats->tx);
27973 if (vcc->pop)
27974 vcc->pop(vcc, skb);
27975 else
27976@@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
27977 ATM_DESC(skb) = vcc->vci;
27978 skb_queue_tail(&iadev->tx_dma_q, skb);
27979
27980- atomic_inc(&vcc->stats->tx);
27981+ atomic_inc_unchecked(&vcc->stats->tx);
27982 iadev->tx_pkt_cnt++;
27983 /* Increment transaction counter */
27984 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
27985
27986 #if 0
27987 /* add flow control logic */
27988- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
27989+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
27990 if (iavcc->vc_desc_cnt > 10) {
27991 vcc->tx_quota = vcc->tx_quota * 3 / 4;
27992 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
27993diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
27994index f556969..0da15eb 100644
27995--- a/drivers/atm/lanai.c
27996+++ b/drivers/atm/lanai.c
27997@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
27998 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
27999 lanai_endtx(lanai, lvcc);
28000 lanai_free_skb(lvcc->tx.atmvcc, skb);
28001- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28002+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28003 }
28004
28005 /* Try to fill the buffer - don't call unless there is backlog */
28006@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
28007 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28008 __net_timestamp(skb);
28009 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28010- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28011+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28012 out:
28013 lvcc->rx.buf.ptr = end;
28014 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
28015@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28016 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28017 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28018 lanai->stats.service_rxnotaal5++;
28019- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28020+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28021 return 0;
28022 }
28023 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
28024@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28025 int bytes;
28026 read_unlock(&vcc_sklist_lock);
28027 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28028- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28029+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28030 lvcc->stats.x.aal5.service_trash++;
28031 bytes = (SERVICE_GET_END(s) * 16) -
28032 (((unsigned long) lvcc->rx.buf.ptr) -
28033@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28034 }
28035 if (s & SERVICE_STREAM) {
28036 read_unlock(&vcc_sklist_lock);
28037- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28038+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28039 lvcc->stats.x.aal5.service_stream++;
28040 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28041 "PDU on VCI %d!\n", lanai->number, vci);
28042@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28043 return 0;
28044 }
28045 DPRINTK("got rx crc error on vci %d\n", vci);
28046- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28047+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28048 lvcc->stats.x.aal5.service_rxcrc++;
28049 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28050 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
28051diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28052index 1c70c45..300718d 100644
28053--- a/drivers/atm/nicstar.c
28054+++ b/drivers/atm/nicstar.c
28055@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28056 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28057 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28058 card->index);
28059- atomic_inc(&vcc->stats->tx_err);
28060+ atomic_inc_unchecked(&vcc->stats->tx_err);
28061 dev_kfree_skb_any(skb);
28062 return -EINVAL;
28063 }
28064@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28065 if (!vc->tx) {
28066 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28067 card->index);
28068- atomic_inc(&vcc->stats->tx_err);
28069+ atomic_inc_unchecked(&vcc->stats->tx_err);
28070 dev_kfree_skb_any(skb);
28071 return -EINVAL;
28072 }
28073@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28074 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28075 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28076 card->index);
28077- atomic_inc(&vcc->stats->tx_err);
28078+ atomic_inc_unchecked(&vcc->stats->tx_err);
28079 dev_kfree_skb_any(skb);
28080 return -EINVAL;
28081 }
28082
28083 if (skb_shinfo(skb)->nr_frags != 0) {
28084 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28085- atomic_inc(&vcc->stats->tx_err);
28086+ atomic_inc_unchecked(&vcc->stats->tx_err);
28087 dev_kfree_skb_any(skb);
28088 return -EINVAL;
28089 }
28090@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28091 }
28092
28093 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28094- atomic_inc(&vcc->stats->tx_err);
28095+ atomic_inc_unchecked(&vcc->stats->tx_err);
28096 dev_kfree_skb_any(skb);
28097 return -EIO;
28098 }
28099- atomic_inc(&vcc->stats->tx);
28100+ atomic_inc_unchecked(&vcc->stats->tx);
28101
28102 return 0;
28103 }
28104@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28105 printk
28106 ("nicstar%d: Can't allocate buffers for aal0.\n",
28107 card->index);
28108- atomic_add(i, &vcc->stats->rx_drop);
28109+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
28110 break;
28111 }
28112 if (!atm_charge(vcc, sb->truesize)) {
28113 RXPRINTK
28114 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28115 card->index);
28116- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28117+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28118 dev_kfree_skb_any(sb);
28119 break;
28120 }
28121@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28122 ATM_SKB(sb)->vcc = vcc;
28123 __net_timestamp(sb);
28124 vcc->push(vcc, sb);
28125- atomic_inc(&vcc->stats->rx);
28126+ atomic_inc_unchecked(&vcc->stats->rx);
28127 cell += ATM_CELL_PAYLOAD;
28128 }
28129
28130@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28131 if (iovb == NULL) {
28132 printk("nicstar%d: Out of iovec buffers.\n",
28133 card->index);
28134- atomic_inc(&vcc->stats->rx_drop);
28135+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28136 recycle_rx_buf(card, skb);
28137 return;
28138 }
28139@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28140 small or large buffer itself. */
28141 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28142 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28143- atomic_inc(&vcc->stats->rx_err);
28144+ atomic_inc_unchecked(&vcc->stats->rx_err);
28145 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28146 NS_MAX_IOVECS);
28147 NS_PRV_IOVCNT(iovb) = 0;
28148@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28149 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28150 card->index);
28151 which_list(card, skb);
28152- atomic_inc(&vcc->stats->rx_err);
28153+ atomic_inc_unchecked(&vcc->stats->rx_err);
28154 recycle_rx_buf(card, skb);
28155 vc->rx_iov = NULL;
28156 recycle_iov_buf(card, iovb);
28157@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28158 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28159 card->index);
28160 which_list(card, skb);
28161- atomic_inc(&vcc->stats->rx_err);
28162+ atomic_inc_unchecked(&vcc->stats->rx_err);
28163 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28164 NS_PRV_IOVCNT(iovb));
28165 vc->rx_iov = NULL;
28166@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28167 printk(" - PDU size mismatch.\n");
28168 else
28169 printk(".\n");
28170- atomic_inc(&vcc->stats->rx_err);
28171+ atomic_inc_unchecked(&vcc->stats->rx_err);
28172 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28173 NS_PRV_IOVCNT(iovb));
28174 vc->rx_iov = NULL;
28175@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28176 /* skb points to a small buffer */
28177 if (!atm_charge(vcc, skb->truesize)) {
28178 push_rxbufs(card, skb);
28179- atomic_inc(&vcc->stats->rx_drop);
28180+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28181 } else {
28182 skb_put(skb, len);
28183 dequeue_sm_buf(card, skb);
28184@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28185 ATM_SKB(skb)->vcc = vcc;
28186 __net_timestamp(skb);
28187 vcc->push(vcc, skb);
28188- atomic_inc(&vcc->stats->rx);
28189+ atomic_inc_unchecked(&vcc->stats->rx);
28190 }
28191 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28192 struct sk_buff *sb;
28193@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28194 if (len <= NS_SMBUFSIZE) {
28195 if (!atm_charge(vcc, sb->truesize)) {
28196 push_rxbufs(card, sb);
28197- atomic_inc(&vcc->stats->rx_drop);
28198+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28199 } else {
28200 skb_put(sb, len);
28201 dequeue_sm_buf(card, sb);
28202@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28203 ATM_SKB(sb)->vcc = vcc;
28204 __net_timestamp(sb);
28205 vcc->push(vcc, sb);
28206- atomic_inc(&vcc->stats->rx);
28207+ atomic_inc_unchecked(&vcc->stats->rx);
28208 }
28209
28210 push_rxbufs(card, skb);
28211@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28212
28213 if (!atm_charge(vcc, skb->truesize)) {
28214 push_rxbufs(card, skb);
28215- atomic_inc(&vcc->stats->rx_drop);
28216+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28217 } else {
28218 dequeue_lg_buf(card, skb);
28219 #ifdef NS_USE_DESTRUCTORS
28220@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28221 ATM_SKB(skb)->vcc = vcc;
28222 __net_timestamp(skb);
28223 vcc->push(vcc, skb);
28224- atomic_inc(&vcc->stats->rx);
28225+ atomic_inc_unchecked(&vcc->stats->rx);
28226 }
28227
28228 push_rxbufs(card, sb);
28229@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28230 printk
28231 ("nicstar%d: Out of huge buffers.\n",
28232 card->index);
28233- atomic_inc(&vcc->stats->rx_drop);
28234+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28235 recycle_iovec_rx_bufs(card,
28236 (struct iovec *)
28237 iovb->data,
28238@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28239 card->hbpool.count++;
28240 } else
28241 dev_kfree_skb_any(hb);
28242- atomic_inc(&vcc->stats->rx_drop);
28243+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28244 } else {
28245 /* Copy the small buffer to the huge buffer */
28246 sb = (struct sk_buff *)iov->iov_base;
28247@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28248 #endif /* NS_USE_DESTRUCTORS */
28249 __net_timestamp(hb);
28250 vcc->push(vcc, hb);
28251- atomic_inc(&vcc->stats->rx);
28252+ atomic_inc_unchecked(&vcc->stats->rx);
28253 }
28254 }
28255
28256diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
28257index e8cd652..bbbd1fc 100644
28258--- a/drivers/atm/solos-pci.c
28259+++ b/drivers/atm/solos-pci.c
28260@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
28261 }
28262 atm_charge(vcc, skb->truesize);
28263 vcc->push(vcc, skb);
28264- atomic_inc(&vcc->stats->rx);
28265+ atomic_inc_unchecked(&vcc->stats->rx);
28266 break;
28267
28268 case PKT_STATUS:
28269@@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
28270 vcc = SKB_CB(oldskb)->vcc;
28271
28272 if (vcc) {
28273- atomic_inc(&vcc->stats->tx);
28274+ atomic_inc_unchecked(&vcc->stats->tx);
28275 solos_pop(vcc, oldskb);
28276 } else
28277 dev_kfree_skb_irq(oldskb);
28278diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28279index 90f1ccc..04c4a1e 100644
28280--- a/drivers/atm/suni.c
28281+++ b/drivers/atm/suni.c
28282@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
28283
28284
28285 #define ADD_LIMITED(s,v) \
28286- atomic_add((v),&stats->s); \
28287- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28288+ atomic_add_unchecked((v),&stats->s); \
28289+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28290
28291
28292 static void suni_hz(unsigned long from_timer)
28293diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28294index 5120a96..e2572bd 100644
28295--- a/drivers/atm/uPD98402.c
28296+++ b/drivers/atm/uPD98402.c
28297@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
28298 struct sonet_stats tmp;
28299 int error = 0;
28300
28301- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28302+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28303 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28304 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28305 if (zero && !error) {
28306@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
28307
28308
28309 #define ADD_LIMITED(s,v) \
28310- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28311- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28312- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28313+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28314+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28315+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28316
28317
28318 static void stat_event(struct atm_dev *dev)
28319@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
28320 if (reason & uPD98402_INT_PFM) stat_event(dev);
28321 if (reason & uPD98402_INT_PCO) {
28322 (void) GET(PCOCR); /* clear interrupt cause */
28323- atomic_add(GET(HECCT),
28324+ atomic_add_unchecked(GET(HECCT),
28325 &PRIV(dev)->sonet_stats.uncorr_hcs);
28326 }
28327 if ((reason & uPD98402_INT_RFO) &&
28328@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
28329 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28330 uPD98402_INT_LOS),PIMR); /* enable them */
28331 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28332- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28333- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28334- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28335+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28336+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28337+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28338 return 0;
28339 }
28340
28341diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
28342index d889f56..17eb71e 100644
28343--- a/drivers/atm/zatm.c
28344+++ b/drivers/atm/zatm.c
28345@@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28346 }
28347 if (!size) {
28348 dev_kfree_skb_irq(skb);
28349- if (vcc) atomic_inc(&vcc->stats->rx_err);
28350+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28351 continue;
28352 }
28353 if (!atm_charge(vcc,skb->truesize)) {
28354@@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28355 skb->len = size;
28356 ATM_SKB(skb)->vcc = vcc;
28357 vcc->push(vcc,skb);
28358- atomic_inc(&vcc->stats->rx);
28359+ atomic_inc_unchecked(&vcc->stats->rx);
28360 }
28361 zout(pos & 0xffff,MTA(mbx));
28362 #if 0 /* probably a stupid idea */
28363@@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
28364 skb_queue_head(&zatm_vcc->backlog,skb);
28365 break;
28366 }
28367- atomic_inc(&vcc->stats->tx);
28368+ atomic_inc_unchecked(&vcc->stats->tx);
28369 wake_up(&zatm_vcc->tx_wait);
28370 }
28371
28372diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
28373index 8493536..31adee0 100644
28374--- a/drivers/base/devtmpfs.c
28375+++ b/drivers/base/devtmpfs.c
28376@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
28377 if (!thread)
28378 return 0;
28379
28380- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
28381+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
28382 if (err)
28383 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
28384 else
28385diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
28386index caf995f..6f76697 100644
28387--- a/drivers/base/power/wakeup.c
28388+++ b/drivers/base/power/wakeup.c
28389@@ -30,14 +30,14 @@ bool events_check_enabled;
28390 * They need to be modified together atomically, so it's better to use one
28391 * atomic variable to hold them both.
28392 */
28393-static atomic_t combined_event_count = ATOMIC_INIT(0);
28394+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
28395
28396 #define IN_PROGRESS_BITS (sizeof(int) * 4)
28397 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
28398
28399 static void split_counters(unsigned int *cnt, unsigned int *inpr)
28400 {
28401- unsigned int comb = atomic_read(&combined_event_count);
28402+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
28403
28404 *cnt = (comb >> IN_PROGRESS_BITS);
28405 *inpr = comb & MAX_IN_PROGRESS;
28406@@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
28407 ws->last_time = ktime_get();
28408
28409 /* Increment the counter of events in progress. */
28410- atomic_inc(&combined_event_count);
28411+ atomic_inc_unchecked(&combined_event_count);
28412 }
28413
28414 /**
28415@@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
28416 * Increment the counter of registered wakeup events and decrement the
28417 * couter of wakeup events in progress simultaneously.
28418 */
28419- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
28420+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
28421 }
28422
28423 /**
28424diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
28425index b0f553b..77b928b 100644
28426--- a/drivers/block/cciss.c
28427+++ b/drivers/block/cciss.c
28428@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
28429 int err;
28430 u32 cp;
28431
28432+ memset(&arg64, 0, sizeof(arg64));
28433+
28434 err = 0;
28435 err |=
28436 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
28437@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
28438 while (!list_empty(&h->reqQ)) {
28439 c = list_entry(h->reqQ.next, CommandList_struct, list);
28440 /* can't do anything if fifo is full */
28441- if ((h->access.fifo_full(h))) {
28442+ if ((h->access->fifo_full(h))) {
28443 dev_warn(&h->pdev->dev, "fifo full\n");
28444 break;
28445 }
28446@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
28447 h->Qdepth--;
28448
28449 /* Tell the controller execute command */
28450- h->access.submit_command(h, c);
28451+ h->access->submit_command(h, c);
28452
28453 /* Put job onto the completed Q */
28454 addQ(&h->cmpQ, c);
28455@@ -3443,17 +3445,17 @@ startio:
28456
28457 static inline unsigned long get_next_completion(ctlr_info_t *h)
28458 {
28459- return h->access.command_completed(h);
28460+ return h->access->command_completed(h);
28461 }
28462
28463 static inline int interrupt_pending(ctlr_info_t *h)
28464 {
28465- return h->access.intr_pending(h);
28466+ return h->access->intr_pending(h);
28467 }
28468
28469 static inline long interrupt_not_for_us(ctlr_info_t *h)
28470 {
28471- return ((h->access.intr_pending(h) == 0) ||
28472+ return ((h->access->intr_pending(h) == 0) ||
28473 (h->interrupts_enabled == 0));
28474 }
28475
28476@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
28477 u32 a;
28478
28479 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
28480- return h->access.command_completed(h);
28481+ return h->access->command_completed(h);
28482
28483 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
28484 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
28485@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
28486 trans_support & CFGTBL_Trans_use_short_tags);
28487
28488 /* Change the access methods to the performant access methods */
28489- h->access = SA5_performant_access;
28490+ h->access = &SA5_performant_access;
28491 h->transMethod = CFGTBL_Trans_Performant;
28492
28493 return;
28494@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
28495 if (prod_index < 0)
28496 return -ENODEV;
28497 h->product_name = products[prod_index].product_name;
28498- h->access = *(products[prod_index].access);
28499+ h->access = products[prod_index].access;
28500
28501 if (cciss_board_disabled(h)) {
28502 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
28503@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
28504 }
28505
28506 /* make sure the board interrupts are off */
28507- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28508+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28509 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
28510 if (rc)
28511 goto clean2;
28512@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
28513 * fake ones to scoop up any residual completions.
28514 */
28515 spin_lock_irqsave(&h->lock, flags);
28516- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28517+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28518 spin_unlock_irqrestore(&h->lock, flags);
28519 free_irq(h->intr[h->intr_mode], h);
28520 rc = cciss_request_irq(h, cciss_msix_discard_completions,
28521@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
28522 dev_info(&h->pdev->dev, "Board READY.\n");
28523 dev_info(&h->pdev->dev,
28524 "Waiting for stale completions to drain.\n");
28525- h->access.set_intr_mask(h, CCISS_INTR_ON);
28526+ h->access->set_intr_mask(h, CCISS_INTR_ON);
28527 msleep(10000);
28528- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28529+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28530
28531 rc = controller_reset_failed(h->cfgtable);
28532 if (rc)
28533@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
28534 cciss_scsi_setup(h);
28535
28536 /* Turn the interrupts on so we can service requests */
28537- h->access.set_intr_mask(h, CCISS_INTR_ON);
28538+ h->access->set_intr_mask(h, CCISS_INTR_ON);
28539
28540 /* Get the firmware version */
28541 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
28542@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
28543 kfree(flush_buf);
28544 if (return_code != IO_OK)
28545 dev_warn(&h->pdev->dev, "Error flushing cache\n");
28546- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28547+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28548 free_irq(h->intr[h->intr_mode], h);
28549 }
28550
28551diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
28552index 7fda30e..eb5dfe0 100644
28553--- a/drivers/block/cciss.h
28554+++ b/drivers/block/cciss.h
28555@@ -101,7 +101,7 @@ struct ctlr_info
28556 /* information about each logical volume */
28557 drive_info_struct *drv[CISS_MAX_LUN];
28558
28559- struct access_method access;
28560+ struct access_method *access;
28561
28562 /* queue and queue Info */
28563 struct list_head reqQ;
28564diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
28565index 9125bbe..eede5c8 100644
28566--- a/drivers/block/cpqarray.c
28567+++ b/drivers/block/cpqarray.c
28568@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28569 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
28570 goto Enomem4;
28571 }
28572- hba[i]->access.set_intr_mask(hba[i], 0);
28573+ hba[i]->access->set_intr_mask(hba[i], 0);
28574 if (request_irq(hba[i]->intr, do_ida_intr,
28575 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
28576 {
28577@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28578 add_timer(&hba[i]->timer);
28579
28580 /* Enable IRQ now that spinlock and rate limit timer are set up */
28581- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28582+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28583
28584 for(j=0; j<NWD; j++) {
28585 struct gendisk *disk = ida_gendisk[i][j];
28586@@ -694,7 +694,7 @@ DBGINFO(
28587 for(i=0; i<NR_PRODUCTS; i++) {
28588 if (board_id == products[i].board_id) {
28589 c->product_name = products[i].product_name;
28590- c->access = *(products[i].access);
28591+ c->access = products[i].access;
28592 break;
28593 }
28594 }
28595@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
28596 hba[ctlr]->intr = intr;
28597 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
28598 hba[ctlr]->product_name = products[j].product_name;
28599- hba[ctlr]->access = *(products[j].access);
28600+ hba[ctlr]->access = products[j].access;
28601 hba[ctlr]->ctlr = ctlr;
28602 hba[ctlr]->board_id = board_id;
28603 hba[ctlr]->pci_dev = NULL; /* not PCI */
28604@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
28605
28606 while((c = h->reqQ) != NULL) {
28607 /* Can't do anything if we're busy */
28608- if (h->access.fifo_full(h) == 0)
28609+ if (h->access->fifo_full(h) == 0)
28610 return;
28611
28612 /* Get the first entry from the request Q */
28613@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
28614 h->Qdepth--;
28615
28616 /* Tell the controller to do our bidding */
28617- h->access.submit_command(h, c);
28618+ h->access->submit_command(h, c);
28619
28620 /* Get onto the completion Q */
28621 addQ(&h->cmpQ, c);
28622@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28623 unsigned long flags;
28624 __u32 a,a1;
28625
28626- istat = h->access.intr_pending(h);
28627+ istat = h->access->intr_pending(h);
28628 /* Is this interrupt for us? */
28629 if (istat == 0)
28630 return IRQ_NONE;
28631@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28632 */
28633 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
28634 if (istat & FIFO_NOT_EMPTY) {
28635- while((a = h->access.command_completed(h))) {
28636+ while((a = h->access->command_completed(h))) {
28637 a1 = a; a &= ~3;
28638 if ((c = h->cmpQ) == NULL)
28639 {
28640@@ -1449,11 +1449,11 @@ static int sendcmd(
28641 /*
28642 * Disable interrupt
28643 */
28644- info_p->access.set_intr_mask(info_p, 0);
28645+ info_p->access->set_intr_mask(info_p, 0);
28646 /* Make sure there is room in the command FIFO */
28647 /* Actually it should be completely empty at this time. */
28648 for (i = 200000; i > 0; i--) {
28649- temp = info_p->access.fifo_full(info_p);
28650+ temp = info_p->access->fifo_full(info_p);
28651 if (temp != 0) {
28652 break;
28653 }
28654@@ -1466,7 +1466,7 @@ DBG(
28655 /*
28656 * Send the cmd
28657 */
28658- info_p->access.submit_command(info_p, c);
28659+ info_p->access->submit_command(info_p, c);
28660 complete = pollcomplete(ctlr);
28661
28662 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
28663@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
28664 * we check the new geometry. Then turn interrupts back on when
28665 * we're done.
28666 */
28667- host->access.set_intr_mask(host, 0);
28668+ host->access->set_intr_mask(host, 0);
28669 getgeometry(ctlr);
28670- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
28671+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
28672
28673 for(i=0; i<NWD; i++) {
28674 struct gendisk *disk = ida_gendisk[ctlr][i];
28675@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
28676 /* Wait (up to 2 seconds) for a command to complete */
28677
28678 for (i = 200000; i > 0; i--) {
28679- done = hba[ctlr]->access.command_completed(hba[ctlr]);
28680+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
28681 if (done == 0) {
28682 udelay(10); /* a short fixed delay */
28683 } else
28684diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
28685index be73e9d..7fbf140 100644
28686--- a/drivers/block/cpqarray.h
28687+++ b/drivers/block/cpqarray.h
28688@@ -99,7 +99,7 @@ struct ctlr_info {
28689 drv_info_t drv[NWD];
28690 struct proc_dir_entry *proc;
28691
28692- struct access_method access;
28693+ struct access_method *access;
28694
28695 cmdlist_t *reqQ;
28696 cmdlist_t *cmpQ;
28697diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
28698index 8d68056..e67050f 100644
28699--- a/drivers/block/drbd/drbd_int.h
28700+++ b/drivers/block/drbd/drbd_int.h
28701@@ -736,7 +736,7 @@ struct drbd_request;
28702 struct drbd_epoch {
28703 struct list_head list;
28704 unsigned int barrier_nr;
28705- atomic_t epoch_size; /* increased on every request added. */
28706+ atomic_unchecked_t epoch_size; /* increased on every request added. */
28707 atomic_t active; /* increased on every req. added, and dec on every finished. */
28708 unsigned long flags;
28709 };
28710@@ -1108,7 +1108,7 @@ struct drbd_conf {
28711 void *int_dig_in;
28712 void *int_dig_vv;
28713 wait_queue_head_t seq_wait;
28714- atomic_t packet_seq;
28715+ atomic_unchecked_t packet_seq;
28716 unsigned int peer_seq;
28717 spinlock_t peer_seq_lock;
28718 unsigned int minor;
28719@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
28720
28721 static inline void drbd_tcp_cork(struct socket *sock)
28722 {
28723- int __user val = 1;
28724+ int val = 1;
28725 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
28726- (char __user *)&val, sizeof(val));
28727+ (char __force_user *)&val, sizeof(val));
28728 }
28729
28730 static inline void drbd_tcp_uncork(struct socket *sock)
28731 {
28732- int __user val = 0;
28733+ int val = 0;
28734 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
28735- (char __user *)&val, sizeof(val));
28736+ (char __force_user *)&val, sizeof(val));
28737 }
28738
28739 static inline void drbd_tcp_nodelay(struct socket *sock)
28740 {
28741- int __user val = 1;
28742+ int val = 1;
28743 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
28744- (char __user *)&val, sizeof(val));
28745+ (char __force_user *)&val, sizeof(val));
28746 }
28747
28748 static inline void drbd_tcp_quickack(struct socket *sock)
28749 {
28750- int __user val = 2;
28751+ int val = 2;
28752 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
28753- (char __user *)&val, sizeof(val));
28754+ (char __force_user *)&val, sizeof(val));
28755 }
28756
28757 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
28758diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
28759index 211fc44..c5116f1 100644
28760--- a/drivers/block/drbd/drbd_main.c
28761+++ b/drivers/block/drbd/drbd_main.c
28762@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
28763 p.sector = sector;
28764 p.block_id = block_id;
28765 p.blksize = blksize;
28766- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
28767+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
28768
28769 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
28770 return false;
28771@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
28772 p.sector = cpu_to_be64(req->sector);
28773 p.block_id = (unsigned long)req;
28774 p.seq_num = cpu_to_be32(req->seq_num =
28775- atomic_add_return(1, &mdev->packet_seq));
28776+ atomic_add_return_unchecked(1, &mdev->packet_seq));
28777
28778 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
28779
28780@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
28781 atomic_set(&mdev->unacked_cnt, 0);
28782 atomic_set(&mdev->local_cnt, 0);
28783 atomic_set(&mdev->net_cnt, 0);
28784- atomic_set(&mdev->packet_seq, 0);
28785+ atomic_set_unchecked(&mdev->packet_seq, 0);
28786 atomic_set(&mdev->pp_in_use, 0);
28787 atomic_set(&mdev->pp_in_use_by_net, 0);
28788 atomic_set(&mdev->rs_sect_in, 0);
28789@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
28790 mdev->receiver.t_state);
28791
28792 /* no need to lock it, I'm the only thread alive */
28793- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
28794- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
28795+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
28796+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
28797 mdev->al_writ_cnt =
28798 mdev->bm_writ_cnt =
28799 mdev->read_cnt =
28800diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
28801index af2a250..219c74b 100644
28802--- a/drivers/block/drbd/drbd_nl.c
28803+++ b/drivers/block/drbd/drbd_nl.c
28804@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
28805 module_put(THIS_MODULE);
28806 }
28807
28808-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
28809+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
28810
28811 static unsigned short *
28812 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
28813@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
28814 cn_reply->id.idx = CN_IDX_DRBD;
28815 cn_reply->id.val = CN_VAL_DRBD;
28816
28817- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28818+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28819 cn_reply->ack = 0; /* not used here. */
28820 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28821 (int)((char *)tl - (char *)reply->tag_list);
28822@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
28823 cn_reply->id.idx = CN_IDX_DRBD;
28824 cn_reply->id.val = CN_VAL_DRBD;
28825
28826- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28827+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28828 cn_reply->ack = 0; /* not used here. */
28829 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28830 (int)((char *)tl - (char *)reply->tag_list);
28831@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
28832 cn_reply->id.idx = CN_IDX_DRBD;
28833 cn_reply->id.val = CN_VAL_DRBD;
28834
28835- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
28836+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
28837 cn_reply->ack = 0; // not used here.
28838 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28839 (int)((char*)tl - (char*)reply->tag_list);
28840@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
28841 cn_reply->id.idx = CN_IDX_DRBD;
28842 cn_reply->id.val = CN_VAL_DRBD;
28843
28844- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
28845+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
28846 cn_reply->ack = 0; /* not used here. */
28847 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
28848 (int)((char *)tl - (char *)reply->tag_list);
28849diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
28850index 43beaca..4a5b1dd 100644
28851--- a/drivers/block/drbd/drbd_receiver.c
28852+++ b/drivers/block/drbd/drbd_receiver.c
28853@@ -894,7 +894,7 @@ retry:
28854 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
28855 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
28856
28857- atomic_set(&mdev->packet_seq, 0);
28858+ atomic_set_unchecked(&mdev->packet_seq, 0);
28859 mdev->peer_seq = 0;
28860
28861 drbd_thread_start(&mdev->asender);
28862@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
28863 do {
28864 next_epoch = NULL;
28865
28866- epoch_size = atomic_read(&epoch->epoch_size);
28867+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
28868
28869 switch (ev & ~EV_CLEANUP) {
28870 case EV_PUT:
28871@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
28872 rv = FE_DESTROYED;
28873 } else {
28874 epoch->flags = 0;
28875- atomic_set(&epoch->epoch_size, 0);
28876+ atomic_set_unchecked(&epoch->epoch_size, 0);
28877 /* atomic_set(&epoch->active, 0); is already zero */
28878 if (rv == FE_STILL_LIVE)
28879 rv = FE_RECYCLED;
28880@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
28881 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
28882 drbd_flush(mdev);
28883
28884- if (atomic_read(&mdev->current_epoch->epoch_size)) {
28885+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
28886 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
28887 if (epoch)
28888 break;
28889 }
28890
28891 epoch = mdev->current_epoch;
28892- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
28893+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
28894
28895 D_ASSERT(atomic_read(&epoch->active) == 0);
28896 D_ASSERT(epoch->flags == 0);
28897@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
28898 }
28899
28900 epoch->flags = 0;
28901- atomic_set(&epoch->epoch_size, 0);
28902+ atomic_set_unchecked(&epoch->epoch_size, 0);
28903 atomic_set(&epoch->active, 0);
28904
28905 spin_lock(&mdev->epoch_lock);
28906- if (atomic_read(&mdev->current_epoch->epoch_size)) {
28907+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
28908 list_add(&epoch->list, &mdev->current_epoch->list);
28909 mdev->current_epoch = epoch;
28910 mdev->epochs++;
28911@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
28912 spin_unlock(&mdev->peer_seq_lock);
28913
28914 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
28915- atomic_inc(&mdev->current_epoch->epoch_size);
28916+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
28917 return drbd_drain_block(mdev, data_size);
28918 }
28919
28920@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
28921
28922 spin_lock(&mdev->epoch_lock);
28923 e->epoch = mdev->current_epoch;
28924- atomic_inc(&e->epoch->epoch_size);
28925+ atomic_inc_unchecked(&e->epoch->epoch_size);
28926 atomic_inc(&e->epoch->active);
28927 spin_unlock(&mdev->epoch_lock);
28928
28929@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
28930 D_ASSERT(list_empty(&mdev->done_ee));
28931
28932 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
28933- atomic_set(&mdev->current_epoch->epoch_size, 0);
28934+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
28935 D_ASSERT(list_empty(&mdev->current_epoch->list));
28936 }
28937
28938diff --git a/drivers/block/loop.c b/drivers/block/loop.c
28939index cd50435..ba1ffb5 100644
28940--- a/drivers/block/loop.c
28941+++ b/drivers/block/loop.c
28942@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
28943 mm_segment_t old_fs = get_fs();
28944
28945 set_fs(get_ds());
28946- bw = file->f_op->write(file, buf, len, &pos);
28947+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
28948 set_fs(old_fs);
28949 if (likely(bw == len))
28950 return 0;
28951diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
28952index 4364303..9adf4ee 100644
28953--- a/drivers/char/Kconfig
28954+++ b/drivers/char/Kconfig
28955@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
28956
28957 config DEVKMEM
28958 bool "/dev/kmem virtual device support"
28959- default y
28960+ default n
28961+ depends on !GRKERNSEC_KMEM
28962 help
28963 Say Y here if you want to support the /dev/kmem device. The
28964 /dev/kmem device is rarely used, but can be used for certain
28965@@ -596,6 +597,7 @@ config DEVPORT
28966 bool
28967 depends on !M68K
28968 depends on ISA || PCI
28969+ depends on !GRKERNSEC_KMEM
28970 default y
28971
28972 source "drivers/s390/char/Kconfig"
28973diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
28974index 2e04433..22afc64 100644
28975--- a/drivers/char/agp/frontend.c
28976+++ b/drivers/char/agp/frontend.c
28977@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
28978 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
28979 return -EFAULT;
28980
28981- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
28982+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
28983 return -EFAULT;
28984
28985 client = agp_find_client_by_pid(reserve.pid);
28986diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
28987index 095ab90..afad0a4 100644
28988--- a/drivers/char/briq_panel.c
28989+++ b/drivers/char/briq_panel.c
28990@@ -9,6 +9,7 @@
28991 #include <linux/types.h>
28992 #include <linux/errno.h>
28993 #include <linux/tty.h>
28994+#include <linux/mutex.h>
28995 #include <linux/timer.h>
28996 #include <linux/kernel.h>
28997 #include <linux/wait.h>
28998@@ -34,6 +35,7 @@ static int vfd_is_open;
28999 static unsigned char vfd[40];
29000 static int vfd_cursor;
29001 static unsigned char ledpb, led;
29002+static DEFINE_MUTEX(vfd_mutex);
29003
29004 static void update_vfd(void)
29005 {
29006@@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
29007 if (!vfd_is_open)
29008 return -EBUSY;
29009
29010+ mutex_lock(&vfd_mutex);
29011 for (;;) {
29012 char c;
29013 if (!indx)
29014 break;
29015- if (get_user(c, buf))
29016+ if (get_user(c, buf)) {
29017+ mutex_unlock(&vfd_mutex);
29018 return -EFAULT;
29019+ }
29020 if (esc) {
29021 set_led(c);
29022 esc = 0;
29023@@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
29024 buf++;
29025 }
29026 update_vfd();
29027+ mutex_unlock(&vfd_mutex);
29028
29029 return len;
29030 }
29031diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29032index f773a9d..65cd683 100644
29033--- a/drivers/char/genrtc.c
29034+++ b/drivers/char/genrtc.c
29035@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
29036 switch (cmd) {
29037
29038 case RTC_PLL_GET:
29039+ memset(&pll, 0, sizeof(pll));
29040 if (get_rtc_pll(&pll))
29041 return -EINVAL;
29042 else
29043diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29044index 0833896..cccce52 100644
29045--- a/drivers/char/hpet.c
29046+++ b/drivers/char/hpet.c
29047@@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
29048 }
29049
29050 static int
29051-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29052+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29053 struct hpet_info *info)
29054 {
29055 struct hpet_timer __iomem *timer;
29056diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
29057index 58c0e63..46c16bf 100644
29058--- a/drivers/char/ipmi/ipmi_msghandler.c
29059+++ b/drivers/char/ipmi/ipmi_msghandler.c
29060@@ -415,7 +415,7 @@ struct ipmi_smi {
29061 struct proc_dir_entry *proc_dir;
29062 char proc_dir_name[10];
29063
29064- atomic_t stats[IPMI_NUM_STATS];
29065+ atomic_unchecked_t stats[IPMI_NUM_STATS];
29066
29067 /*
29068 * run_to_completion duplicate of smb_info, smi_info
29069@@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
29070
29071
29072 #define ipmi_inc_stat(intf, stat) \
29073- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29074+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29075 #define ipmi_get_stat(intf, stat) \
29076- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29077+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29078
29079 static int is_lan_addr(struct ipmi_addr *addr)
29080 {
29081@@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29082 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29083 init_waitqueue_head(&intf->waitq);
29084 for (i = 0; i < IPMI_NUM_STATS; i++)
29085- atomic_set(&intf->stats[i], 0);
29086+ atomic_set_unchecked(&intf->stats[i], 0);
29087
29088 intf->proc_dir = NULL;
29089
29090diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
29091index 50fcf9c..91b5528 100644
29092--- a/drivers/char/ipmi/ipmi_si_intf.c
29093+++ b/drivers/char/ipmi/ipmi_si_intf.c
29094@@ -277,7 +277,7 @@ struct smi_info {
29095 unsigned char slave_addr;
29096
29097 /* Counters and things for the proc filesystem. */
29098- atomic_t stats[SI_NUM_STATS];
29099+ atomic_unchecked_t stats[SI_NUM_STATS];
29100
29101 struct task_struct *thread;
29102
29103@@ -286,9 +286,9 @@ struct smi_info {
29104 };
29105
29106 #define smi_inc_stat(smi, stat) \
29107- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29108+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29109 #define smi_get_stat(smi, stat) \
29110- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29111+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29112
29113 #define SI_MAX_PARMS 4
29114
29115@@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
29116 atomic_set(&new_smi->req_events, 0);
29117 new_smi->run_to_completion = 0;
29118 for (i = 0; i < SI_NUM_STATS; i++)
29119- atomic_set(&new_smi->stats[i], 0);
29120+ atomic_set_unchecked(&new_smi->stats[i], 0);
29121
29122 new_smi->interrupt_disabled = 1;
29123 atomic_set(&new_smi->stop_operation, 0);
29124diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29125index 1aeaaba..e018570 100644
29126--- a/drivers/char/mbcs.c
29127+++ b/drivers/char/mbcs.c
29128@@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
29129 return 0;
29130 }
29131
29132-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29133+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29134 {
29135 .part_num = MBCS_PART_NUM,
29136 .mfg_num = MBCS_MFG_NUM,
29137diff --git a/drivers/char/mem.c b/drivers/char/mem.c
29138index d6e9d08..4493e89 100644
29139--- a/drivers/char/mem.c
29140+++ b/drivers/char/mem.c
29141@@ -18,6 +18,7 @@
29142 #include <linux/raw.h>
29143 #include <linux/tty.h>
29144 #include <linux/capability.h>
29145+#include <linux/security.h>
29146 #include <linux/ptrace.h>
29147 #include <linux/device.h>
29148 #include <linux/highmem.h>
29149@@ -35,6 +36,10 @@
29150 # include <linux/efi.h>
29151 #endif
29152
29153+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29154+extern const struct file_operations grsec_fops;
29155+#endif
29156+
29157 static inline unsigned long size_inside_page(unsigned long start,
29158 unsigned long size)
29159 {
29160@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29161
29162 while (cursor < to) {
29163 if (!devmem_is_allowed(pfn)) {
29164+#ifdef CONFIG_GRKERNSEC_KMEM
29165+ gr_handle_mem_readwrite(from, to);
29166+#else
29167 printk(KERN_INFO
29168 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29169 current->comm, from, to);
29170+#endif
29171 return 0;
29172 }
29173 cursor += PAGE_SIZE;
29174@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29175 }
29176 return 1;
29177 }
29178+#elif defined(CONFIG_GRKERNSEC_KMEM)
29179+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29180+{
29181+ return 0;
29182+}
29183 #else
29184 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29185 {
29186@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29187
29188 while (count > 0) {
29189 unsigned long remaining;
29190+ char *temp;
29191
29192 sz = size_inside_page(p, count);
29193
29194@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29195 if (!ptr)
29196 return -EFAULT;
29197
29198- remaining = copy_to_user(buf, ptr, sz);
29199+#ifdef CONFIG_PAX_USERCOPY
29200+ temp = kmalloc(sz, GFP_KERNEL);
29201+ if (!temp) {
29202+ unxlate_dev_mem_ptr(p, ptr);
29203+ return -ENOMEM;
29204+ }
29205+ memcpy(temp, ptr, sz);
29206+#else
29207+ temp = ptr;
29208+#endif
29209+
29210+ remaining = copy_to_user(buf, temp, sz);
29211+
29212+#ifdef CONFIG_PAX_USERCOPY
29213+ kfree(temp);
29214+#endif
29215+
29216 unxlate_dev_mem_ptr(p, ptr);
29217 if (remaining)
29218 return -EFAULT;
29219@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29220 size_t count, loff_t *ppos)
29221 {
29222 unsigned long p = *ppos;
29223- ssize_t low_count, read, sz;
29224+ ssize_t low_count, read, sz, err = 0;
29225 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29226- int err = 0;
29227
29228 read = 0;
29229 if (p < (unsigned long) high_memory) {
29230@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29231 }
29232 #endif
29233 while (low_count > 0) {
29234+ char *temp;
29235+
29236 sz = size_inside_page(p, low_count);
29237
29238 /*
29239@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29240 */
29241 kbuf = xlate_dev_kmem_ptr((char *)p);
29242
29243- if (copy_to_user(buf, kbuf, sz))
29244+#ifdef CONFIG_PAX_USERCOPY
29245+ temp = kmalloc(sz, GFP_KERNEL);
29246+ if (!temp)
29247+ return -ENOMEM;
29248+ memcpy(temp, kbuf, sz);
29249+#else
29250+ temp = kbuf;
29251+#endif
29252+
29253+ err = copy_to_user(buf, temp, sz);
29254+
29255+#ifdef CONFIG_PAX_USERCOPY
29256+ kfree(temp);
29257+#endif
29258+
29259+ if (err)
29260 return -EFAULT;
29261 buf += sz;
29262 p += sz;
29263@@ -867,6 +914,9 @@ static const struct memdev {
29264 #ifdef CONFIG_CRASH_DUMP
29265 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29266 #endif
29267+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29268+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29269+#endif
29270 };
29271
29272 static int memory_open(struct inode *inode, struct file *filp)
29273diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29274index da3cfee..a5a6606 100644
29275--- a/drivers/char/nvram.c
29276+++ b/drivers/char/nvram.c
29277@@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
29278
29279 spin_unlock_irq(&rtc_lock);
29280
29281- if (copy_to_user(buf, contents, tmp - contents))
29282+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29283 return -EFAULT;
29284
29285 *ppos = i;
29286diff --git a/drivers/char/random.c b/drivers/char/random.c
29287index 54ca8b2..d58cb51 100644
29288--- a/drivers/char/random.c
29289+++ b/drivers/char/random.c
29290@@ -261,8 +261,13 @@
29291 /*
29292 * Configuration information
29293 */
29294+#ifdef CONFIG_GRKERNSEC_RANDNET
29295+#define INPUT_POOL_WORDS 512
29296+#define OUTPUT_POOL_WORDS 128
29297+#else
29298 #define INPUT_POOL_WORDS 128
29299 #define OUTPUT_POOL_WORDS 32
29300+#endif
29301 #define SEC_XFER_SIZE 512
29302 #define EXTRACT_SIZE 10
29303
29304@@ -300,10 +305,17 @@ static struct poolinfo {
29305 int poolwords;
29306 int tap1, tap2, tap3, tap4, tap5;
29307 } poolinfo_table[] = {
29308+#ifdef CONFIG_GRKERNSEC_RANDNET
29309+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29310+ { 512, 411, 308, 208, 104, 1 },
29311+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29312+ { 128, 103, 76, 51, 25, 1 },
29313+#else
29314 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29315 { 128, 103, 76, 51, 25, 1 },
29316 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29317 { 32, 26, 20, 14, 7, 1 },
29318+#endif
29319 #if 0
29320 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29321 { 2048, 1638, 1231, 819, 411, 1 },
29322@@ -913,7 +925,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
29323
29324 extract_buf(r, tmp);
29325 i = min_t(int, nbytes, EXTRACT_SIZE);
29326- if (copy_to_user(buf, tmp, i)) {
29327+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
29328 ret = -EFAULT;
29329 break;
29330 }
29331@@ -1238,7 +1250,7 @@ EXPORT_SYMBOL(generate_random_uuid);
29332 #include <linux/sysctl.h>
29333
29334 static int min_read_thresh = 8, min_write_thresh;
29335-static int max_read_thresh = INPUT_POOL_WORDS * 32;
29336+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29337 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29338 static char sysctl_bootid[16];
29339
29340diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
29341index 1ee8ce7..b778bef 100644
29342--- a/drivers/char/sonypi.c
29343+++ b/drivers/char/sonypi.c
29344@@ -55,6 +55,7 @@
29345 #include <asm/uaccess.h>
29346 #include <asm/io.h>
29347 #include <asm/system.h>
29348+#include <asm/local.h>
29349
29350 #include <linux/sonypi.h>
29351
29352@@ -491,7 +492,7 @@ static struct sonypi_device {
29353 spinlock_t fifo_lock;
29354 wait_queue_head_t fifo_proc_list;
29355 struct fasync_struct *fifo_async;
29356- int open_count;
29357+ local_t open_count;
29358 int model;
29359 struct input_dev *input_jog_dev;
29360 struct input_dev *input_key_dev;
29361@@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
29362 static int sonypi_misc_release(struct inode *inode, struct file *file)
29363 {
29364 mutex_lock(&sonypi_device.lock);
29365- sonypi_device.open_count--;
29366+ local_dec(&sonypi_device.open_count);
29367 mutex_unlock(&sonypi_device.lock);
29368 return 0;
29369 }
29370@@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
29371 {
29372 mutex_lock(&sonypi_device.lock);
29373 /* Flush input queue on first open */
29374- if (!sonypi_device.open_count)
29375+ if (!local_read(&sonypi_device.open_count))
29376 kfifo_reset(&sonypi_device.fifo);
29377- sonypi_device.open_count++;
29378+ local_inc(&sonypi_device.open_count);
29379 mutex_unlock(&sonypi_device.lock);
29380
29381 return 0;
29382diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
29383index ad7c732..5aa8054 100644
29384--- a/drivers/char/tpm/tpm.c
29385+++ b/drivers/char/tpm/tpm.c
29386@@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
29387 chip->vendor.req_complete_val)
29388 goto out_recv;
29389
29390- if ((status == chip->vendor.req_canceled)) {
29391+ if (status == chip->vendor.req_canceled) {
29392 dev_err(chip->dev, "Operation Canceled\n");
29393 rc = -ECANCELED;
29394 goto out;
29395diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
29396index 0636520..169c1d0 100644
29397--- a/drivers/char/tpm/tpm_bios.c
29398+++ b/drivers/char/tpm/tpm_bios.c
29399@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
29400 event = addr;
29401
29402 if ((event->event_type == 0 && event->event_size == 0) ||
29403- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
29404+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
29405 return NULL;
29406
29407 return addr;
29408@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
29409 return NULL;
29410
29411 if ((event->event_type == 0 && event->event_size == 0) ||
29412- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
29413+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
29414 return NULL;
29415
29416 (*pos)++;
29417@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
29418 int i;
29419
29420 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
29421- seq_putc(m, data[i]);
29422+ if (!seq_putc(m, data[i]))
29423+ return -EFAULT;
29424
29425 return 0;
29426 }
29427@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
29428 log->bios_event_log_end = log->bios_event_log + len;
29429
29430 virt = acpi_os_map_memory(start, len);
29431+ if (!virt) {
29432+ kfree(log->bios_event_log);
29433+ log->bios_event_log = NULL;
29434+ return -EFAULT;
29435+ }
29436
29437- memcpy(log->bios_event_log, virt, len);
29438+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
29439
29440 acpi_os_unmap_memory(virt, len);
29441 return 0;
29442diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
29443index b58b561..c9088c8 100644
29444--- a/drivers/char/virtio_console.c
29445+++ b/drivers/char/virtio_console.c
29446@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
29447 if (to_user) {
29448 ssize_t ret;
29449
29450- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
29451+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
29452 if (ret)
29453 return -EFAULT;
29454 } else {
29455@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
29456 if (!port_has_data(port) && !port->host_connected)
29457 return 0;
29458
29459- return fill_readbuf(port, ubuf, count, true);
29460+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
29461 }
29462
29463 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
29464diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
29465index c9eee6d..f9d5280 100644
29466--- a/drivers/edac/amd64_edac.c
29467+++ b/drivers/edac/amd64_edac.c
29468@@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
29469 * PCI core identifies what devices are on a system during boot, and then
29470 * inquiry this table to see if this driver is for a given device found.
29471 */
29472-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
29473+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
29474 {
29475 .vendor = PCI_VENDOR_ID_AMD,
29476 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
29477diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
29478index e47e73b..348e0bd 100644
29479--- a/drivers/edac/amd76x_edac.c
29480+++ b/drivers/edac/amd76x_edac.c
29481@@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
29482 edac_mc_free(mci);
29483 }
29484
29485-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
29486+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
29487 {
29488 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29489 AMD762},
29490diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
29491index 1af531a..3a8ff27 100644
29492--- a/drivers/edac/e752x_edac.c
29493+++ b/drivers/edac/e752x_edac.c
29494@@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
29495 edac_mc_free(mci);
29496 }
29497
29498-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
29499+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
29500 {
29501 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29502 E7520},
29503diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
29504index 6ffb6d2..383d8d7 100644
29505--- a/drivers/edac/e7xxx_edac.c
29506+++ b/drivers/edac/e7xxx_edac.c
29507@@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
29508 edac_mc_free(mci);
29509 }
29510
29511-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
29512+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
29513 {
29514 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29515 E7205},
29516diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
29517index 97f5064..202b6e6 100644
29518--- a/drivers/edac/edac_pci_sysfs.c
29519+++ b/drivers/edac/edac_pci_sysfs.c
29520@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
29521 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
29522 static int edac_pci_poll_msec = 1000; /* one second workq period */
29523
29524-static atomic_t pci_parity_count = ATOMIC_INIT(0);
29525-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
29526+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
29527+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
29528
29529 static struct kobject *edac_pci_top_main_kobj;
29530 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
29531@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29532 edac_printk(KERN_CRIT, EDAC_PCI,
29533 "Signaled System Error on %s\n",
29534 pci_name(dev));
29535- atomic_inc(&pci_nonparity_count);
29536+ atomic_inc_unchecked(&pci_nonparity_count);
29537 }
29538
29539 if (status & (PCI_STATUS_PARITY)) {
29540@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29541 "Master Data Parity Error on %s\n",
29542 pci_name(dev));
29543
29544- atomic_inc(&pci_parity_count);
29545+ atomic_inc_unchecked(&pci_parity_count);
29546 }
29547
29548 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29549@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29550 "Detected Parity Error on %s\n",
29551 pci_name(dev));
29552
29553- atomic_inc(&pci_parity_count);
29554+ atomic_inc_unchecked(&pci_parity_count);
29555 }
29556 }
29557
29558@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29559 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29560 "Signaled System Error on %s\n",
29561 pci_name(dev));
29562- atomic_inc(&pci_nonparity_count);
29563+ atomic_inc_unchecked(&pci_nonparity_count);
29564 }
29565
29566 if (status & (PCI_STATUS_PARITY)) {
29567@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29568 "Master Data Parity Error on "
29569 "%s\n", pci_name(dev));
29570
29571- atomic_inc(&pci_parity_count);
29572+ atomic_inc_unchecked(&pci_parity_count);
29573 }
29574
29575 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29576@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29577 "Detected Parity Error on %s\n",
29578 pci_name(dev));
29579
29580- atomic_inc(&pci_parity_count);
29581+ atomic_inc_unchecked(&pci_parity_count);
29582 }
29583 }
29584 }
29585@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
29586 if (!check_pci_errors)
29587 return;
29588
29589- before_count = atomic_read(&pci_parity_count);
29590+ before_count = atomic_read_unchecked(&pci_parity_count);
29591
29592 /* scan all PCI devices looking for a Parity Error on devices and
29593 * bridges.
29594@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
29595 /* Only if operator has selected panic on PCI Error */
29596 if (edac_pci_get_panic_on_pe()) {
29597 /* If the count is different 'after' from 'before' */
29598- if (before_count != atomic_read(&pci_parity_count))
29599+ if (before_count != atomic_read_unchecked(&pci_parity_count))
29600 panic("EDAC: PCI Parity Error");
29601 }
29602 }
29603diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
29604index c0510b3..6e2a954 100644
29605--- a/drivers/edac/i3000_edac.c
29606+++ b/drivers/edac/i3000_edac.c
29607@@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
29608 edac_mc_free(mci);
29609 }
29610
29611-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
29612+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
29613 {
29614 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29615 I3000},
29616diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
29617index 73f55e200..5faaf59 100644
29618--- a/drivers/edac/i3200_edac.c
29619+++ b/drivers/edac/i3200_edac.c
29620@@ -445,7 +445,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
29621 edac_mc_free(mci);
29622 }
29623
29624-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
29625+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
29626 {
29627 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29628 I3200},
29629diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
29630index 4dc3ac2..67d05a6 100644
29631--- a/drivers/edac/i5000_edac.c
29632+++ b/drivers/edac/i5000_edac.c
29633@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
29634 *
29635 * The "E500P" device is the first device supported.
29636 */
29637-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
29638+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
29639 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
29640 .driver_data = I5000P},
29641
29642diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
29643index bcbdeec..9886d16 100644
29644--- a/drivers/edac/i5100_edac.c
29645+++ b/drivers/edac/i5100_edac.c
29646@@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
29647 edac_mc_free(mci);
29648 }
29649
29650-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
29651+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
29652 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
29653 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
29654 { 0, }
29655diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
29656index 74d6ec34..baff517 100644
29657--- a/drivers/edac/i5400_edac.c
29658+++ b/drivers/edac/i5400_edac.c
29659@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
29660 *
29661 * The "E500P" device is the first device supported.
29662 */
29663-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
29664+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
29665 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
29666 {0,} /* 0 terminated list. */
29667 };
29668diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
29669index 6104dba..e7ea8e1 100644
29670--- a/drivers/edac/i7300_edac.c
29671+++ b/drivers/edac/i7300_edac.c
29672@@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
29673 *
29674 * Has only 8086:360c PCI ID
29675 */
29676-static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
29677+static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
29678 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
29679 {0,} /* 0 terminated list. */
29680 };
29681diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
29682index 8568d9b..42b2fa8 100644
29683--- a/drivers/edac/i7core_edac.c
29684+++ b/drivers/edac/i7core_edac.c
29685@@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
29686 /*
29687 * pci_device_id table for which devices we are looking for
29688 */
29689-static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
29690+static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
29691 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
29692 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
29693 {0,} /* 0 terminated list. */
29694diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
29695index 4329d39..f3022ef 100644
29696--- a/drivers/edac/i82443bxgx_edac.c
29697+++ b/drivers/edac/i82443bxgx_edac.c
29698@@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
29699
29700 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
29701
29702-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
29703+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
29704 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
29705 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
29706 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
29707diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
29708index 931a057..fd28340 100644
29709--- a/drivers/edac/i82860_edac.c
29710+++ b/drivers/edac/i82860_edac.c
29711@@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
29712 edac_mc_free(mci);
29713 }
29714
29715-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
29716+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
29717 {
29718 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29719 I82860},
29720diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
29721index 33864c6..01edc61 100644
29722--- a/drivers/edac/i82875p_edac.c
29723+++ b/drivers/edac/i82875p_edac.c
29724@@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
29725 edac_mc_free(mci);
29726 }
29727
29728-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
29729+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
29730 {
29731 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29732 I82875P},
29733diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
29734index 4184e01..dcb2cd3 100644
29735--- a/drivers/edac/i82975x_edac.c
29736+++ b/drivers/edac/i82975x_edac.c
29737@@ -612,7 +612,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
29738 edac_mc_free(mci);
29739 }
29740
29741-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
29742+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
29743 {
29744 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29745 I82975X
29746diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
29747index 0106747..0b40417 100644
29748--- a/drivers/edac/mce_amd.h
29749+++ b/drivers/edac/mce_amd.h
29750@@ -83,7 +83,7 @@ struct amd_decoder_ops {
29751 bool (*dc_mce)(u16, u8);
29752 bool (*ic_mce)(u16, u8);
29753 bool (*nb_mce)(u16, u8);
29754-};
29755+} __no_const;
29756
29757 void amd_report_gart_errors(bool);
29758 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
29759diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
29760index e294e1b..a41b05b 100644
29761--- a/drivers/edac/r82600_edac.c
29762+++ b/drivers/edac/r82600_edac.c
29763@@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
29764 edac_mc_free(mci);
29765 }
29766
29767-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
29768+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
29769 {
29770 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
29771 },
29772diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
29773index 1dc118d..8c68af9 100644
29774--- a/drivers/edac/sb_edac.c
29775+++ b/drivers/edac/sb_edac.c
29776@@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
29777 /*
29778 * pci_device_id table for which devices we are looking for
29779 */
29780-static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
29781+static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
29782 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
29783 {0,} /* 0 terminated list. */
29784 };
29785diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
29786index b6f47de..c5acf3a 100644
29787--- a/drivers/edac/x38_edac.c
29788+++ b/drivers/edac/x38_edac.c
29789@@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
29790 edac_mc_free(mci);
29791 }
29792
29793-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
29794+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
29795 {
29796 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29797 X38},
29798diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
29799index 85661b0..c784559a 100644
29800--- a/drivers/firewire/core-card.c
29801+++ b/drivers/firewire/core-card.c
29802@@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
29803
29804 void fw_core_remove_card(struct fw_card *card)
29805 {
29806- struct fw_card_driver dummy_driver = dummy_driver_template;
29807+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
29808
29809 card->driver->update_phy_reg(card, 4,
29810 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29811diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
29812index 4799393..37bd3ab 100644
29813--- a/drivers/firewire/core-cdev.c
29814+++ b/drivers/firewire/core-cdev.c
29815@@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
29816 int ret;
29817
29818 if ((request->channels == 0 && request->bandwidth == 0) ||
29819- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29820- request->bandwidth < 0)
29821+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29822 return -EINVAL;
29823
29824 r = kmalloc(sizeof(*r), GFP_KERNEL);
29825diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
29826index 855ab3f..11f4bbd 100644
29827--- a/drivers/firewire/core-transaction.c
29828+++ b/drivers/firewire/core-transaction.c
29829@@ -37,6 +37,7 @@
29830 #include <linux/timer.h>
29831 #include <linux/types.h>
29832 #include <linux/workqueue.h>
29833+#include <linux/sched.h>
29834
29835 #include <asm/byteorder.h>
29836
29837diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
29838index b45be57..5fad18b 100644
29839--- a/drivers/firewire/core.h
29840+++ b/drivers/firewire/core.h
29841@@ -101,6 +101,7 @@ struct fw_card_driver {
29842
29843 int (*stop_iso)(struct fw_iso_context *ctx);
29844 };
29845+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
29846
29847 void fw_card_initialize(struct fw_card *card,
29848 const struct fw_card_driver *driver, struct device *device);
29849diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
29850index 153980b..4b4d046 100644
29851--- a/drivers/firmware/dmi_scan.c
29852+++ b/drivers/firmware/dmi_scan.c
29853@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
29854 }
29855 }
29856 else {
29857- /*
29858- * no iounmap() for that ioremap(); it would be a no-op, but
29859- * it's so early in setup that sucker gets confused into doing
29860- * what it shouldn't if we actually call it.
29861- */
29862 p = dmi_ioremap(0xF0000, 0x10000);
29863 if (p == NULL)
29864 goto error;
29865@@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
29866 if (buf == NULL)
29867 return -1;
29868
29869- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
29870+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
29871
29872 iounmap(buf);
29873 return 0;
29874diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
29875index 82d5c20..44a7177 100644
29876--- a/drivers/gpio/gpio-vr41xx.c
29877+++ b/drivers/gpio/gpio-vr41xx.c
29878@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29879 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29880 maskl, pendl, maskh, pendh);
29881
29882- atomic_inc(&irq_err_count);
29883+ atomic_inc_unchecked(&irq_err_count);
29884
29885 return -EINVAL;
29886 }
29887diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
29888index 84a4a80..ce0306e 100644
29889--- a/drivers/gpu/drm/drm_crtc_helper.c
29890+++ b/drivers/gpu/drm/drm_crtc_helper.c
29891@@ -280,7 +280,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
29892 struct drm_crtc *tmp;
29893 int crtc_mask = 1;
29894
29895- WARN(!crtc, "checking null crtc?\n");
29896+ BUG_ON(!crtc);
29897
29898 dev = crtc->dev;
29899
29900diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
29901index ebf7d3f..d64c436 100644
29902--- a/drivers/gpu/drm/drm_drv.c
29903+++ b/drivers/gpu/drm/drm_drv.c
29904@@ -312,7 +312,7 @@ module_exit(drm_core_exit);
29905 /**
29906 * Copy and IOCTL return string to user space
29907 */
29908-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
29909+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
29910 {
29911 int len;
29912
29913@@ -391,7 +391,7 @@ long drm_ioctl(struct file *filp,
29914
29915 dev = file_priv->minor->dev;
29916 atomic_inc(&dev->ioctl_count);
29917- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29918+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29919 ++file_priv->ioctl_count;
29920
29921 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29922diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
29923index 6263b01..7987f55 100644
29924--- a/drivers/gpu/drm/drm_fops.c
29925+++ b/drivers/gpu/drm/drm_fops.c
29926@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
29927 }
29928
29929 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29930- atomic_set(&dev->counts[i], 0);
29931+ atomic_set_unchecked(&dev->counts[i], 0);
29932
29933 dev->sigdata.lock = NULL;
29934
29935@@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
29936
29937 retcode = drm_open_helper(inode, filp, dev);
29938 if (!retcode) {
29939- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29940- if (!dev->open_count++)
29941+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29942+ if (local_inc_return(&dev->open_count) == 1)
29943 retcode = drm_setup(dev);
29944 }
29945 if (!retcode) {
29946@@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
29947
29948 mutex_lock(&drm_global_mutex);
29949
29950- DRM_DEBUG("open_count = %d\n", dev->open_count);
29951+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
29952
29953 if (dev->driver->preclose)
29954 dev->driver->preclose(dev, file_priv);
29955@@ -482,10 +482,10 @@ int drm_release(struct inode *inode, struct file *filp)
29956 * Begin inline drm_release
29957 */
29958
29959- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29960+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
29961 task_pid_nr(current),
29962 (long)old_encode_dev(file_priv->minor->device),
29963- dev->open_count);
29964+ local_read(&dev->open_count));
29965
29966 /* Release any auth tokens that might point to this file_priv,
29967 (do that under the drm_global_mutex) */
29968@@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
29969 * End inline drm_release
29970 */
29971
29972- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
29973- if (!--dev->open_count) {
29974+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
29975+ if (local_dec_and_test(&dev->open_count)) {
29976 if (atomic_read(&dev->ioctl_count)) {
29977 DRM_ERROR("Device busy: %d\n",
29978 atomic_read(&dev->ioctl_count));
29979diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
29980index c87dc96..326055d 100644
29981--- a/drivers/gpu/drm/drm_global.c
29982+++ b/drivers/gpu/drm/drm_global.c
29983@@ -36,7 +36,7 @@
29984 struct drm_global_item {
29985 struct mutex mutex;
29986 void *object;
29987- int refcount;
29988+ atomic_t refcount;
29989 };
29990
29991 static struct drm_global_item glob[DRM_GLOBAL_NUM];
29992@@ -49,7 +49,7 @@ void drm_global_init(void)
29993 struct drm_global_item *item = &glob[i];
29994 mutex_init(&item->mutex);
29995 item->object = NULL;
29996- item->refcount = 0;
29997+ atomic_set(&item->refcount, 0);
29998 }
29999 }
30000
30001@@ -59,7 +59,7 @@ void drm_global_release(void)
30002 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30003 struct drm_global_item *item = &glob[i];
30004 BUG_ON(item->object != NULL);
30005- BUG_ON(item->refcount != 0);
30006+ BUG_ON(atomic_read(&item->refcount) != 0);
30007 }
30008 }
30009
30010@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30011 void *object;
30012
30013 mutex_lock(&item->mutex);
30014- if (item->refcount == 0) {
30015+ if (atomic_read(&item->refcount) == 0) {
30016 item->object = kzalloc(ref->size, GFP_KERNEL);
30017 if (unlikely(item->object == NULL)) {
30018 ret = -ENOMEM;
30019@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30020 goto out_err;
30021
30022 }
30023- ++item->refcount;
30024+ atomic_inc(&item->refcount);
30025 ref->object = item->object;
30026 object = item->object;
30027 mutex_unlock(&item->mutex);
30028@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
30029 struct drm_global_item *item = &glob[ref->global_type];
30030
30031 mutex_lock(&item->mutex);
30032- BUG_ON(item->refcount == 0);
30033+ BUG_ON(atomic_read(&item->refcount) == 0);
30034 BUG_ON(ref->object != item->object);
30035- if (--item->refcount == 0) {
30036+ if (atomic_dec_and_test(&item->refcount)) {
30037 ref->release(ref);
30038 item->object = NULL;
30039 }
30040diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30041index ab1162d..42587b2 100644
30042--- a/drivers/gpu/drm/drm_info.c
30043+++ b/drivers/gpu/drm/drm_info.c
30044@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
30045 struct drm_local_map *map;
30046 struct drm_map_list *r_list;
30047
30048- /* Hardcoded from _DRM_FRAME_BUFFER,
30049- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30050- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30051- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30052+ static const char * const types[] = {
30053+ [_DRM_FRAME_BUFFER] = "FB",
30054+ [_DRM_REGISTERS] = "REG",
30055+ [_DRM_SHM] = "SHM",
30056+ [_DRM_AGP] = "AGP",
30057+ [_DRM_SCATTER_GATHER] = "SG",
30058+ [_DRM_CONSISTENT] = "PCI",
30059+ [_DRM_GEM] = "GEM" };
30060 const char *type;
30061 int i;
30062
30063@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
30064 map = r_list->map;
30065 if (!map)
30066 continue;
30067- if (map->type < 0 || map->type > 5)
30068+ if (map->type >= ARRAY_SIZE(types))
30069 type = "??";
30070 else
30071 type = types[map->type];
30072@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
30073 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30074 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30075 vma->vm_flags & VM_IO ? 'i' : '-',
30076+#ifdef CONFIG_GRKERNSEC_HIDESYM
30077+ 0);
30078+#else
30079 vma->vm_pgoff);
30080+#endif
30081
30082 #if defined(__i386__)
30083 pgprot = pgprot_val(vma->vm_page_prot);
30084diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
30085index 637fcc3..e890b33 100644
30086--- a/drivers/gpu/drm/drm_ioc32.c
30087+++ b/drivers/gpu/drm/drm_ioc32.c
30088@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
30089 request = compat_alloc_user_space(nbytes);
30090 if (!access_ok(VERIFY_WRITE, request, nbytes))
30091 return -EFAULT;
30092- list = (struct drm_buf_desc *) (request + 1);
30093+ list = (struct drm_buf_desc __user *) (request + 1);
30094
30095 if (__put_user(count, &request->count)
30096 || __put_user(list, &request->list))
30097@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
30098 request = compat_alloc_user_space(nbytes);
30099 if (!access_ok(VERIFY_WRITE, request, nbytes))
30100 return -EFAULT;
30101- list = (struct drm_buf_pub *) (request + 1);
30102+ list = (struct drm_buf_pub __user *) (request + 1);
30103
30104 if (__put_user(count, &request->count)
30105 || __put_user(list, &request->list))
30106diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
30107index 956fd38..e52167a 100644
30108--- a/drivers/gpu/drm/drm_ioctl.c
30109+++ b/drivers/gpu/drm/drm_ioctl.c
30110@@ -251,7 +251,7 @@ int drm_getstats(struct drm_device *dev, void *data,
30111 stats->data[i].value =
30112 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30113 else
30114- stats->data[i].value = atomic_read(&dev->counts[i]);
30115+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30116 stats->data[i].type = dev->types[i];
30117 }
30118
30119diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
30120index c79c713..2048588 100644
30121--- a/drivers/gpu/drm/drm_lock.c
30122+++ b/drivers/gpu/drm/drm_lock.c
30123@@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30124 if (drm_lock_take(&master->lock, lock->context)) {
30125 master->lock.file_priv = file_priv;
30126 master->lock.lock_time = jiffies;
30127- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30128+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30129 break; /* Got lock */
30130 }
30131
30132@@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30133 return -EINVAL;
30134 }
30135
30136- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30137+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30138
30139 if (drm_lock_free(&master->lock, lock->context)) {
30140 /* FIXME: Should really bail out here. */
30141diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
30142index 7f4b4e1..bf4def2 100644
30143--- a/drivers/gpu/drm/i810/i810_dma.c
30144+++ b/drivers/gpu/drm/i810/i810_dma.c
30145@@ -948,8 +948,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
30146 dma->buflist[vertex->idx],
30147 vertex->discard, vertex->used);
30148
30149- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30150- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30151+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30152+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30153 sarea_priv->last_enqueue = dev_priv->counter - 1;
30154 sarea_priv->last_dispatch = (int)hw_status[5];
30155
30156@@ -1109,8 +1109,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
30157 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30158 mc->last_render);
30159
30160- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30161- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30162+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30163+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30164 sarea_priv->last_enqueue = dev_priv->counter - 1;
30165 sarea_priv->last_dispatch = (int)hw_status[5];
30166
30167diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30168index c9339f4..f5e1b9d 100644
30169--- a/drivers/gpu/drm/i810/i810_drv.h
30170+++ b/drivers/gpu/drm/i810/i810_drv.h
30171@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30172 int page_flipping;
30173
30174 wait_queue_head_t irq_queue;
30175- atomic_t irq_received;
30176- atomic_t irq_emitted;
30177+ atomic_unchecked_t irq_received;
30178+ atomic_unchecked_t irq_emitted;
30179
30180 int front_offset;
30181 } drm_i810_private_t;
30182diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
30183index deaa657..e0fd296 100644
30184--- a/drivers/gpu/drm/i915/i915_debugfs.c
30185+++ b/drivers/gpu/drm/i915/i915_debugfs.c
30186@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
30187 I915_READ(GTIMR));
30188 }
30189 seq_printf(m, "Interrupts received: %d\n",
30190- atomic_read(&dev_priv->irq_received));
30191+ atomic_read_unchecked(&dev_priv->irq_received));
30192 for (i = 0; i < I915_NUM_RINGS; i++) {
30193 if (IS_GEN6(dev) || IS_GEN7(dev)) {
30194 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
30195@@ -1321,7 +1321,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
30196 return ret;
30197
30198 if (opregion->header)
30199- seq_write(m, opregion->header, OPREGION_SIZE);
30200+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
30201
30202 mutex_unlock(&dev->struct_mutex);
30203
30204diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
30205index ddfe3d9..f6e6b21 100644
30206--- a/drivers/gpu/drm/i915/i915_dma.c
30207+++ b/drivers/gpu/drm/i915/i915_dma.c
30208@@ -1175,7 +1175,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
30209 bool can_switch;
30210
30211 spin_lock(&dev->count_lock);
30212- can_switch = (dev->open_count == 0);
30213+ can_switch = (local_read(&dev->open_count) == 0);
30214 spin_unlock(&dev->count_lock);
30215 return can_switch;
30216 }
30217diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
30218index 9689ca3..294f9c1 100644
30219--- a/drivers/gpu/drm/i915/i915_drv.h
30220+++ b/drivers/gpu/drm/i915/i915_drv.h
30221@@ -231,7 +231,7 @@ struct drm_i915_display_funcs {
30222 /* render clock increase/decrease */
30223 /* display clock increase/decrease */
30224 /* pll clock increase/decrease */
30225-};
30226+} __no_const;
30227
30228 struct intel_device_info {
30229 u8 gen;
30230@@ -320,7 +320,7 @@ typedef struct drm_i915_private {
30231 int current_page;
30232 int page_flipping;
30233
30234- atomic_t irq_received;
30235+ atomic_unchecked_t irq_received;
30236
30237 /* protects the irq masks */
30238 spinlock_t irq_lock;
30239@@ -896,7 +896,7 @@ struct drm_i915_gem_object {
30240 * will be page flipped away on the next vblank. When it
30241 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30242 */
30243- atomic_t pending_flip;
30244+ atomic_unchecked_t pending_flip;
30245 };
30246
30247 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
30248@@ -1276,7 +1276,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
30249 extern void intel_teardown_gmbus(struct drm_device *dev);
30250 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30251 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30252-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30253+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30254 {
30255 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30256 }
30257diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30258index 65e1f00..a30ef00 100644
30259--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30260+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30261@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
30262 i915_gem_clflush_object(obj);
30263
30264 if (obj->base.pending_write_domain)
30265- cd->flips |= atomic_read(&obj->pending_flip);
30266+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30267
30268 /* The actual obj->write_domain will be updated with
30269 * pending_write_domain after we emit the accumulated flush for all
30270@@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
30271
30272 static int
30273 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30274- int count)
30275+ unsigned int count)
30276 {
30277- int i;
30278+ unsigned int i;
30279
30280 for (i = 0; i < count; i++) {
30281 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30282diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
30283index 5bd4361..0241a42 100644
30284--- a/drivers/gpu/drm/i915/i915_irq.c
30285+++ b/drivers/gpu/drm/i915/i915_irq.c
30286@@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
30287 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30288 struct drm_i915_master_private *master_priv;
30289
30290- atomic_inc(&dev_priv->irq_received);
30291+ atomic_inc_unchecked(&dev_priv->irq_received);
30292
30293 /* disable master interrupt before clearing iir */
30294 de_ier = I915_READ(DEIER);
30295@@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
30296 struct drm_i915_master_private *master_priv;
30297 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
30298
30299- atomic_inc(&dev_priv->irq_received);
30300+ atomic_inc_unchecked(&dev_priv->irq_received);
30301
30302 if (IS_GEN6(dev))
30303 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
30304@@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
30305 int ret = IRQ_NONE, pipe;
30306 bool blc_event = false;
30307
30308- atomic_inc(&dev_priv->irq_received);
30309+ atomic_inc_unchecked(&dev_priv->irq_received);
30310
30311 iir = I915_READ(IIR);
30312
30313@@ -1743,7 +1743,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
30314 {
30315 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30316
30317- atomic_set(&dev_priv->irq_received, 0);
30318+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30319
30320 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30321 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30322@@ -1932,7 +1932,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
30323 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30324 int pipe;
30325
30326- atomic_set(&dev_priv->irq_received, 0);
30327+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30328
30329 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30330 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30331diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
30332index 397087c..9178d0d 100644
30333--- a/drivers/gpu/drm/i915/intel_display.c
30334+++ b/drivers/gpu/drm/i915/intel_display.c
30335@@ -2238,7 +2238,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
30336
30337 wait_event(dev_priv->pending_flip_queue,
30338 atomic_read(&dev_priv->mm.wedged) ||
30339- atomic_read(&obj->pending_flip) == 0);
30340+ atomic_read_unchecked(&obj->pending_flip) == 0);
30341
30342 /* Big Hammer, we also need to ensure that any pending
30343 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
30344@@ -2859,7 +2859,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
30345 obj = to_intel_framebuffer(crtc->fb)->obj;
30346 dev_priv = crtc->dev->dev_private;
30347 wait_event(dev_priv->pending_flip_queue,
30348- atomic_read(&obj->pending_flip) == 0);
30349+ atomic_read_unchecked(&obj->pending_flip) == 0);
30350 }
30351
30352 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
30353@@ -7171,7 +7171,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
30354
30355 atomic_clear_mask(1 << intel_crtc->plane,
30356 &obj->pending_flip.counter);
30357- if (atomic_read(&obj->pending_flip) == 0)
30358+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
30359 wake_up(&dev_priv->pending_flip_queue);
30360
30361 schedule_work(&work->work);
30362@@ -7461,7 +7461,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30363 /* Block clients from rendering to the new back buffer until
30364 * the flip occurs and the object is no longer visible.
30365 */
30366- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30367+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30368
30369 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
30370 if (ret)
30371@@ -7475,7 +7475,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30372 return 0;
30373
30374 cleanup_pending:
30375- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30376+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30377 drm_gem_object_unreference(&work->old_fb_obj->base);
30378 drm_gem_object_unreference(&obj->base);
30379 mutex_unlock(&dev->struct_mutex);
30380diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
30381index 54558a0..2d97005 100644
30382--- a/drivers/gpu/drm/mga/mga_drv.h
30383+++ b/drivers/gpu/drm/mga/mga_drv.h
30384@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30385 u32 clear_cmd;
30386 u32 maccess;
30387
30388- atomic_t vbl_received; /**< Number of vblanks received. */
30389+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30390 wait_queue_head_t fence_queue;
30391- atomic_t last_fence_retired;
30392+ atomic_unchecked_t last_fence_retired;
30393 u32 next_fence_to_post;
30394
30395 unsigned int fb_cpp;
30396diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
30397index 2581202..f230a8d9 100644
30398--- a/drivers/gpu/drm/mga/mga_irq.c
30399+++ b/drivers/gpu/drm/mga/mga_irq.c
30400@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
30401 if (crtc != 0)
30402 return 0;
30403
30404- return atomic_read(&dev_priv->vbl_received);
30405+ return atomic_read_unchecked(&dev_priv->vbl_received);
30406 }
30407
30408
30409@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30410 /* VBLANK interrupt */
30411 if (status & MGA_VLINEPEN) {
30412 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30413- atomic_inc(&dev_priv->vbl_received);
30414+ atomic_inc_unchecked(&dev_priv->vbl_received);
30415 drm_handle_vblank(dev, 0);
30416 handled = 1;
30417 }
30418@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30419 if ((prim_start & ~0x03) != (prim_end & ~0x03))
30420 MGA_WRITE(MGA_PRIMEND, prim_end);
30421
30422- atomic_inc(&dev_priv->last_fence_retired);
30423+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
30424 DRM_WAKEUP(&dev_priv->fence_queue);
30425 handled = 1;
30426 }
30427@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
30428 * using fences.
30429 */
30430 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30431- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30432+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30433 - *sequence) <= (1 << 23)));
30434
30435 *sequence = cur_fence;
30436diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
30437index e5cbead..6c354a3 100644
30438--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
30439+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
30440@@ -199,7 +199,7 @@ struct methods {
30441 const char desc[8];
30442 void (*loadbios)(struct drm_device *, uint8_t *);
30443 const bool rw;
30444-};
30445+} __do_const;
30446
30447 static struct methods shadow_methods[] = {
30448 { "PRAMIN", load_vbios_pramin, true },
30449@@ -5290,7 +5290,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
30450 struct bit_table {
30451 const char id;
30452 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
30453-};
30454+} __no_const;
30455
30456 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
30457
30458diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
30459index b827098..c31a797 100644
30460--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
30461+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
30462@@ -242,7 +242,7 @@ struct nouveau_channel {
30463 struct list_head pending;
30464 uint32_t sequence;
30465 uint32_t sequence_ack;
30466- atomic_t last_sequence_irq;
30467+ atomic_unchecked_t last_sequence_irq;
30468 struct nouveau_vma vma;
30469 } fence;
30470
30471@@ -323,7 +323,7 @@ struct nouveau_exec_engine {
30472 u32 handle, u16 class);
30473 void (*set_tile_region)(struct drm_device *dev, int i);
30474 void (*tlb_flush)(struct drm_device *, int engine);
30475-};
30476+} __no_const;
30477
30478 struct nouveau_instmem_engine {
30479 void *priv;
30480@@ -345,13 +345,13 @@ struct nouveau_instmem_engine {
30481 struct nouveau_mc_engine {
30482 int (*init)(struct drm_device *dev);
30483 void (*takedown)(struct drm_device *dev);
30484-};
30485+} __no_const;
30486
30487 struct nouveau_timer_engine {
30488 int (*init)(struct drm_device *dev);
30489 void (*takedown)(struct drm_device *dev);
30490 uint64_t (*read)(struct drm_device *dev);
30491-};
30492+} __no_const;
30493
30494 struct nouveau_fb_engine {
30495 int num_tiles;
30496@@ -566,7 +566,7 @@ struct nouveau_vram_engine {
30497 void (*put)(struct drm_device *, struct nouveau_mem **);
30498
30499 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30500-};
30501+} __no_const;
30502
30503 struct nouveau_engine {
30504 struct nouveau_instmem_engine instmem;
30505@@ -714,7 +714,7 @@ struct drm_nouveau_private {
30506 struct drm_global_reference mem_global_ref;
30507 struct ttm_bo_global_ref bo_global_ref;
30508 struct ttm_bo_device bdev;
30509- atomic_t validate_sequence;
30510+ atomic_unchecked_t validate_sequence;
30511 } ttm;
30512
30513 struct {
30514diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
30515index 2f6daae..c9d7b9e 100644
30516--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
30517+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
30518@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
30519 if (USE_REFCNT(dev))
30520 sequence = nvchan_rd32(chan, 0x48);
30521 else
30522- sequence = atomic_read(&chan->fence.last_sequence_irq);
30523+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
30524
30525 if (chan->fence.sequence_ack == sequence)
30526 goto out;
30527@@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
30528 return ret;
30529 }
30530
30531- atomic_set(&chan->fence.last_sequence_irq, 0);
30532+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
30533 return 0;
30534 }
30535
30536diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30537index 7ce3fde..cb3ea04 100644
30538--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30539+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30540@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30541 int trycnt = 0;
30542 int ret, i;
30543
30544- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30545+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30546 retry:
30547 if (++trycnt > 100000) {
30548 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30549diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30550index f80c5e0..936baa7 100644
30551--- a/drivers/gpu/drm/nouveau/nouveau_state.c
30552+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30553@@ -543,7 +543,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30554 bool can_switch;
30555
30556 spin_lock(&dev->count_lock);
30557- can_switch = (dev->open_count == 0);
30558+ can_switch = (local_read(&dev->open_count) == 0);
30559 spin_unlock(&dev->count_lock);
30560 return can_switch;
30561 }
30562diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
30563index dbdea8e..cd6eeeb 100644
30564--- a/drivers/gpu/drm/nouveau/nv04_graph.c
30565+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
30566@@ -554,7 +554,7 @@ static int
30567 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
30568 u32 class, u32 mthd, u32 data)
30569 {
30570- atomic_set(&chan->fence.last_sequence_irq, data);
30571+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
30572 return 0;
30573 }
30574
30575diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30576index bcac90b..53bfc76 100644
30577--- a/drivers/gpu/drm/r128/r128_cce.c
30578+++ b/drivers/gpu/drm/r128/r128_cce.c
30579@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30580
30581 /* GH: Simple idle check.
30582 */
30583- atomic_set(&dev_priv->idle_count, 0);
30584+ atomic_set_unchecked(&dev_priv->idle_count, 0);
30585
30586 /* We don't support anything other than bus-mastering ring mode,
30587 * but the ring can be in either AGP or PCI space for the ring
30588diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30589index 930c71b..499aded 100644
30590--- a/drivers/gpu/drm/r128/r128_drv.h
30591+++ b/drivers/gpu/drm/r128/r128_drv.h
30592@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30593 int is_pci;
30594 unsigned long cce_buffers_offset;
30595
30596- atomic_t idle_count;
30597+ atomic_unchecked_t idle_count;
30598
30599 int page_flipping;
30600 int current_page;
30601 u32 crtc_offset;
30602 u32 crtc_offset_cntl;
30603
30604- atomic_t vbl_received;
30605+ atomic_unchecked_t vbl_received;
30606
30607 u32 color_fmt;
30608 unsigned int front_offset;
30609diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30610index 429d5a0..7e899ed 100644
30611--- a/drivers/gpu/drm/r128/r128_irq.c
30612+++ b/drivers/gpu/drm/r128/r128_irq.c
30613@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30614 if (crtc != 0)
30615 return 0;
30616
30617- return atomic_read(&dev_priv->vbl_received);
30618+ return atomic_read_unchecked(&dev_priv->vbl_received);
30619 }
30620
30621 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30622@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30623 /* VBLANK interrupt */
30624 if (status & R128_CRTC_VBLANK_INT) {
30625 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30626- atomic_inc(&dev_priv->vbl_received);
30627+ atomic_inc_unchecked(&dev_priv->vbl_received);
30628 drm_handle_vblank(dev, 0);
30629 return IRQ_HANDLED;
30630 }
30631diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30632index a9e33ce..09edd4b 100644
30633--- a/drivers/gpu/drm/r128/r128_state.c
30634+++ b/drivers/gpu/drm/r128/r128_state.c
30635@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30636
30637 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30638 {
30639- if (atomic_read(&dev_priv->idle_count) == 0)
30640+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30641 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30642 else
30643- atomic_set(&dev_priv->idle_count, 0);
30644+ atomic_set_unchecked(&dev_priv->idle_count, 0);
30645 }
30646
30647 #endif
30648diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30649index 5a82b6b..9e69c73 100644
30650--- a/drivers/gpu/drm/radeon/mkregtable.c
30651+++ b/drivers/gpu/drm/radeon/mkregtable.c
30652@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
30653 regex_t mask_rex;
30654 regmatch_t match[4];
30655 char buf[1024];
30656- size_t end;
30657+ long end;
30658 int len;
30659 int done = 0;
30660 int r;
30661 unsigned o;
30662 struct offset *offset;
30663 char last_reg_s[10];
30664- int last_reg;
30665+ unsigned long last_reg;
30666
30667 if (regcomp
30668 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30669diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
30670index 1668ec1..30ebdab 100644
30671--- a/drivers/gpu/drm/radeon/radeon.h
30672+++ b/drivers/gpu/drm/radeon/radeon.h
30673@@ -250,7 +250,7 @@ struct radeon_fence_driver {
30674 uint32_t scratch_reg;
30675 uint64_t gpu_addr;
30676 volatile uint32_t *cpu_addr;
30677- atomic_t seq;
30678+ atomic_unchecked_t seq;
30679 uint32_t last_seq;
30680 unsigned long last_jiffies;
30681 unsigned long last_timeout;
30682@@ -752,7 +752,7 @@ struct r600_blit_cp_primitives {
30683 int x2, int y2);
30684 void (*draw_auto)(struct radeon_device *rdev);
30685 void (*set_default_state)(struct radeon_device *rdev);
30686-};
30687+} __no_const;
30688
30689 struct r600_blit {
30690 struct mutex mutex;
30691@@ -1201,7 +1201,7 @@ struct radeon_asic {
30692 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
30693 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
30694 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
30695-};
30696+} __no_const;
30697
30698 /*
30699 * Asic structures
30700diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
30701index 49f7cb7..2fcb48f 100644
30702--- a/drivers/gpu/drm/radeon/radeon_device.c
30703+++ b/drivers/gpu/drm/radeon/radeon_device.c
30704@@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
30705 bool can_switch;
30706
30707 spin_lock(&dev->count_lock);
30708- can_switch = (dev->open_count == 0);
30709+ can_switch = (local_read(&dev->open_count) == 0);
30710 spin_unlock(&dev->count_lock);
30711 return can_switch;
30712 }
30713diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
30714index a1b59ca..86f2d44 100644
30715--- a/drivers/gpu/drm/radeon/radeon_drv.h
30716+++ b/drivers/gpu/drm/radeon/radeon_drv.h
30717@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
30718
30719 /* SW interrupt */
30720 wait_queue_head_t swi_queue;
30721- atomic_t swi_emitted;
30722+ atomic_unchecked_t swi_emitted;
30723 int vblank_crtc;
30724 uint32_t irq_enable_reg;
30725 uint32_t r500_disp_irq_reg;
30726diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
30727index 4bd36a3..e66fe9c 100644
30728--- a/drivers/gpu/drm/radeon/radeon_fence.c
30729+++ b/drivers/gpu/drm/radeon/radeon_fence.c
30730@@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
30731 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
30732 return 0;
30733 }
30734- fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
30735+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
30736 if (!rdev->ring[fence->ring].ready)
30737 /* FIXME: cp is not running assume everythings is done right
30738 * away
30739@@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
30740 }
30741 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
30742 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
30743- radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
30744+ radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
30745 rdev->fence_drv[ring].initialized = true;
30746 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
30747 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
30748@@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
30749 rdev->fence_drv[ring].scratch_reg = -1;
30750 rdev->fence_drv[ring].cpu_addr = NULL;
30751 rdev->fence_drv[ring].gpu_addr = 0;
30752- atomic_set(&rdev->fence_drv[ring].seq, 0);
30753+ atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
30754 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
30755 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
30756 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
30757diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
30758index 48b7cea..342236f 100644
30759--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
30760+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
30761@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
30762 request = compat_alloc_user_space(sizeof(*request));
30763 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30764 || __put_user(req32.param, &request->param)
30765- || __put_user((void __user *)(unsigned long)req32.value,
30766+ || __put_user((unsigned long)req32.value,
30767 &request->value))
30768 return -EFAULT;
30769
30770diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
30771index 00da384..32f972d 100644
30772--- a/drivers/gpu/drm/radeon/radeon_irq.c
30773+++ b/drivers/gpu/drm/radeon/radeon_irq.c
30774@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
30775 unsigned int ret;
30776 RING_LOCALS;
30777
30778- atomic_inc(&dev_priv->swi_emitted);
30779- ret = atomic_read(&dev_priv->swi_emitted);
30780+ atomic_inc_unchecked(&dev_priv->swi_emitted);
30781+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30782
30783 BEGIN_RING(4);
30784 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30785@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
30786 drm_radeon_private_t *dev_priv =
30787 (drm_radeon_private_t *) dev->dev_private;
30788
30789- atomic_set(&dev_priv->swi_emitted, 0);
30790+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30791 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30792
30793 dev->max_vblank_count = 0x001fffff;
30794diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
30795index e8422ae..d22d4a8 100644
30796--- a/drivers/gpu/drm/radeon/radeon_state.c
30797+++ b/drivers/gpu/drm/radeon/radeon_state.c
30798@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
30799 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
30800 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
30801
30802- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30803+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
30804 sarea_priv->nbox * sizeof(depth_boxes[0])))
30805 return -EFAULT;
30806
30807@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
30808 {
30809 drm_radeon_private_t *dev_priv = dev->dev_private;
30810 drm_radeon_getparam_t *param = data;
30811- int value;
30812+ int value = 0;
30813
30814 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30815
30816diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
30817index c421e77..e6bf2e8 100644
30818--- a/drivers/gpu/drm/radeon/radeon_ttm.c
30819+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
30820@@ -842,8 +842,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30821 }
30822 if (unlikely(ttm_vm_ops == NULL)) {
30823 ttm_vm_ops = vma->vm_ops;
30824- radeon_ttm_vm_ops = *ttm_vm_ops;
30825- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30826+ pax_open_kernel();
30827+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
30828+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30829+ pax_close_kernel();
30830 }
30831 vma->vm_ops = &radeon_ttm_vm_ops;
30832 return 0;
30833diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
30834index f68dff2..8df955c 100644
30835--- a/drivers/gpu/drm/radeon/rs690.c
30836+++ b/drivers/gpu/drm/radeon/rs690.c
30837@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
30838 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30839 rdev->pm.sideport_bandwidth.full)
30840 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30841- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
30842+ read_delay_latency.full = dfixed_const(800 * 1000);
30843 read_delay_latency.full = dfixed_div(read_delay_latency,
30844 rdev->pm.igp_sideport_mclk);
30845+ a.full = dfixed_const(370);
30846+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
30847 } else {
30848 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30849 rdev->pm.k8_bandwidth.full)
30850diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30851index 499debd..66fce72 100644
30852--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
30853+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
30854@@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
30855 static int ttm_pool_mm_shrink(struct shrinker *shrink,
30856 struct shrink_control *sc)
30857 {
30858- static atomic_t start_pool = ATOMIC_INIT(0);
30859+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
30860 unsigned i;
30861- unsigned pool_offset = atomic_add_return(1, &start_pool);
30862+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
30863 struct ttm_page_pool *pool;
30864 int shrink_pages = sc->nr_to_scan;
30865
30866diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
30867index 88edacc..1e5412b 100644
30868--- a/drivers/gpu/drm/via/via_drv.h
30869+++ b/drivers/gpu/drm/via/via_drv.h
30870@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30871 typedef uint32_t maskarray_t[5];
30872
30873 typedef struct drm_via_irq {
30874- atomic_t irq_received;
30875+ atomic_unchecked_t irq_received;
30876 uint32_t pending_mask;
30877 uint32_t enable_mask;
30878 wait_queue_head_t irq_queue;
30879@@ -75,7 +75,7 @@ typedef struct drm_via_private {
30880 struct timeval last_vblank;
30881 int last_vblank_valid;
30882 unsigned usec_per_vblank;
30883- atomic_t vbl_received;
30884+ atomic_unchecked_t vbl_received;
30885 drm_via_state_t hc_state;
30886 char pci_buf[VIA_PCI_BUF_SIZE];
30887 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30888diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
30889index d391f48..10c8ca3 100644
30890--- a/drivers/gpu/drm/via/via_irq.c
30891+++ b/drivers/gpu/drm/via/via_irq.c
30892@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
30893 if (crtc != 0)
30894 return 0;
30895
30896- return atomic_read(&dev_priv->vbl_received);
30897+ return atomic_read_unchecked(&dev_priv->vbl_received);
30898 }
30899
30900 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30901@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30902
30903 status = VIA_READ(VIA_REG_INTERRUPT);
30904 if (status & VIA_IRQ_VBLANK_PENDING) {
30905- atomic_inc(&dev_priv->vbl_received);
30906- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30907+ atomic_inc_unchecked(&dev_priv->vbl_received);
30908+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30909 do_gettimeofday(&cur_vblank);
30910 if (dev_priv->last_vblank_valid) {
30911 dev_priv->usec_per_vblank =
30912@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30913 dev_priv->last_vblank = cur_vblank;
30914 dev_priv->last_vblank_valid = 1;
30915 }
30916- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30917+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30918 DRM_DEBUG("US per vblank is: %u\n",
30919 dev_priv->usec_per_vblank);
30920 }
30921@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30922
30923 for (i = 0; i < dev_priv->num_irqs; ++i) {
30924 if (status & cur_irq->pending_mask) {
30925- atomic_inc(&cur_irq->irq_received);
30926+ atomic_inc_unchecked(&cur_irq->irq_received);
30927 DRM_WAKEUP(&cur_irq->irq_queue);
30928 handled = 1;
30929 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
30930@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
30931 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30932 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30933 masks[irq][4]));
30934- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30935+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30936 } else {
30937 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30938 (((cur_irq_sequence =
30939- atomic_read(&cur_irq->irq_received)) -
30940+ atomic_read_unchecked(&cur_irq->irq_received)) -
30941 *sequence) <= (1 << 23)));
30942 }
30943 *sequence = cur_irq_sequence;
30944@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
30945 }
30946
30947 for (i = 0; i < dev_priv->num_irqs; ++i) {
30948- atomic_set(&cur_irq->irq_received, 0);
30949+ atomic_set_unchecked(&cur_irq->irq_received, 0);
30950 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30951 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30952 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30953@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
30954 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30955 case VIA_IRQ_RELATIVE:
30956 irqwait->request.sequence +=
30957- atomic_read(&cur_irq->irq_received);
30958+ atomic_read_unchecked(&cur_irq->irq_received);
30959 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30960 case VIA_IRQ_ABSOLUTE:
30961 break;
30962diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30963index dc27970..f18b008 100644
30964--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30965+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
30966@@ -260,7 +260,7 @@ struct vmw_private {
30967 * Fencing and IRQs.
30968 */
30969
30970- atomic_t marker_seq;
30971+ atomic_unchecked_t marker_seq;
30972 wait_queue_head_t fence_queue;
30973 wait_queue_head_t fifo_queue;
30974 int fence_queue_waiters; /* Protected by hw_mutex */
30975diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30976index a0c2f12..68ae6cb 100644
30977--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30978+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
30979@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
30980 (unsigned int) min,
30981 (unsigned int) fifo->capabilities);
30982
30983- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
30984+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
30985 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
30986 vmw_marker_queue_init(&fifo->marker_queue);
30987 return vmw_fifo_send_fence(dev_priv, &dummy);
30988@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
30989 if (reserveable)
30990 iowrite32(bytes, fifo_mem +
30991 SVGA_FIFO_RESERVED);
30992- return fifo_mem + (next_cmd >> 2);
30993+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
30994 } else {
30995 need_bounce = true;
30996 }
30997@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
30998
30999 fm = vmw_fifo_reserve(dev_priv, bytes);
31000 if (unlikely(fm == NULL)) {
31001- *seqno = atomic_read(&dev_priv->marker_seq);
31002+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31003 ret = -ENOMEM;
31004 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
31005 false, 3*HZ);
31006@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31007 }
31008
31009 do {
31010- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31011+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31012 } while (*seqno == 0);
31013
31014 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
31015diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31016index cabc95f..14b3d77 100644
31017--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31018+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31019@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
31020 * emitted. Then the fence is stale and signaled.
31021 */
31022
31023- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31024+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
31025 > VMW_FENCE_WRAP);
31026
31027 return ret;
31028@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31029
31030 if (fifo_idle)
31031 down_read(&fifo_state->rwsem);
31032- signal_seq = atomic_read(&dev_priv->marker_seq);
31033+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
31034 ret = 0;
31035
31036 for (;;) {
31037diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31038index 8a8725c..afed796 100644
31039--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31040+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31041@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31042 while (!vmw_lag_lt(queue, us)) {
31043 spin_lock(&queue->lock);
31044 if (list_empty(&queue->head))
31045- seqno = atomic_read(&dev_priv->marker_seq);
31046+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31047 else {
31048 marker = list_first_entry(&queue->head,
31049 struct vmw_marker, head);
31050diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
31051index 75dbe34..f9204a8 100644
31052--- a/drivers/hid/hid-core.c
31053+++ b/drivers/hid/hid-core.c
31054@@ -2021,7 +2021,7 @@ static bool hid_ignore(struct hid_device *hdev)
31055
31056 int hid_add_device(struct hid_device *hdev)
31057 {
31058- static atomic_t id = ATOMIC_INIT(0);
31059+ static atomic_unchecked_t id = ATOMIC_INIT(0);
31060 int ret;
31061
31062 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31063@@ -2036,7 +2036,7 @@ int hid_add_device(struct hid_device *hdev)
31064 /* XXX hack, any other cleaner solution after the driver core
31065 * is converted to allow more than 20 bytes as the device name? */
31066 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31067- hdev->vendor, hdev->product, atomic_inc_return(&id));
31068+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31069
31070 hid_debug_register(hdev, dev_name(&hdev->dev));
31071 ret = device_add(&hdev->dev);
31072diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
31073index b1ec0e2..c295a61 100644
31074--- a/drivers/hid/usbhid/hiddev.c
31075+++ b/drivers/hid/usbhid/hiddev.c
31076@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31077 break;
31078
31079 case HIDIOCAPPLICATION:
31080- if (arg < 0 || arg >= hid->maxapplication)
31081+ if (arg >= hid->maxapplication)
31082 break;
31083
31084 for (i = 0; i < hid->maxcollection; i++)
31085diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31086index 4065374..10ed7dc 100644
31087--- a/drivers/hv/channel.c
31088+++ b/drivers/hv/channel.c
31089@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31090 int ret = 0;
31091 int t;
31092
31093- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31094- atomic_inc(&vmbus_connection.next_gpadl_handle);
31095+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31096+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31097
31098 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31099 if (ret)
31100diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
31101index 12aa97f..c0679f7 100644
31102--- a/drivers/hv/hv.c
31103+++ b/drivers/hv/hv.c
31104@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31105 u64 output_address = (output) ? virt_to_phys(output) : 0;
31106 u32 output_address_hi = output_address >> 32;
31107 u32 output_address_lo = output_address & 0xFFFFFFFF;
31108- void *hypercall_page = hv_context.hypercall_page;
31109+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31110
31111 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31112 "=a"(hv_status_lo) : "d" (control_hi),
31113diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
31114index 6d7d286..92b0873 100644
31115--- a/drivers/hv/hyperv_vmbus.h
31116+++ b/drivers/hv/hyperv_vmbus.h
31117@@ -556,7 +556,7 @@ enum vmbus_connect_state {
31118 struct vmbus_connection {
31119 enum vmbus_connect_state conn_state;
31120
31121- atomic_t next_gpadl_handle;
31122+ atomic_unchecked_t next_gpadl_handle;
31123
31124 /*
31125 * Represents channel interrupts. Each bit position represents a
31126diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
31127index a220e57..428f54d 100644
31128--- a/drivers/hv/vmbus_drv.c
31129+++ b/drivers/hv/vmbus_drv.c
31130@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31131 {
31132 int ret = 0;
31133
31134- static atomic_t device_num = ATOMIC_INIT(0);
31135+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31136
31137 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31138- atomic_inc_return(&device_num));
31139+ atomic_inc_return_unchecked(&device_num));
31140
31141 child_device_obj->device.bus = &hv_bus;
31142 child_device_obj->device.parent = &hv_acpi_dev->dev;
31143diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
31144index 554f046..f8b4729 100644
31145--- a/drivers/hwmon/acpi_power_meter.c
31146+++ b/drivers/hwmon/acpi_power_meter.c
31147@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
31148 return res;
31149
31150 temp /= 1000;
31151- if (temp < 0)
31152- return -EINVAL;
31153
31154 mutex_lock(&resource->lock);
31155 resource->trip[attr->index - 7] = temp;
31156diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
31157index 91fdd1f..b66a686 100644
31158--- a/drivers/hwmon/sht15.c
31159+++ b/drivers/hwmon/sht15.c
31160@@ -166,7 +166,7 @@ struct sht15_data {
31161 int supply_uV;
31162 bool supply_uV_valid;
31163 struct work_struct update_supply_work;
31164- atomic_t interrupt_handled;
31165+ atomic_unchecked_t interrupt_handled;
31166 };
31167
31168 /**
31169@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
31170 return ret;
31171
31172 gpio_direction_input(data->pdata->gpio_data);
31173- atomic_set(&data->interrupt_handled, 0);
31174+ atomic_set_unchecked(&data->interrupt_handled, 0);
31175
31176 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31177 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31178 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31179 /* Only relevant if the interrupt hasn't occurred. */
31180- if (!atomic_read(&data->interrupt_handled))
31181+ if (!atomic_read_unchecked(&data->interrupt_handled))
31182 schedule_work(&data->read_work);
31183 }
31184 ret = wait_event_timeout(data->wait_queue,
31185@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
31186
31187 /* First disable the interrupt */
31188 disable_irq_nosync(irq);
31189- atomic_inc(&data->interrupt_handled);
31190+ atomic_inc_unchecked(&data->interrupt_handled);
31191 /* Then schedule a reading work struct */
31192 if (data->state != SHT15_READING_NOTHING)
31193 schedule_work(&data->read_work);
31194@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
31195 * If not, then start the interrupt again - care here as could
31196 * have gone low in meantime so verify it hasn't!
31197 */
31198- atomic_set(&data->interrupt_handled, 0);
31199+ atomic_set_unchecked(&data->interrupt_handled, 0);
31200 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31201 /* If still not occurred or another handler has been scheduled */
31202 if (gpio_get_value(data->pdata->gpio_data)
31203- || atomic_read(&data->interrupt_handled))
31204+ || atomic_read_unchecked(&data->interrupt_handled))
31205 return;
31206 }
31207
31208diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31209index 378fcb5..5e91fa8 100644
31210--- a/drivers/i2c/busses/i2c-amd756-s4882.c
31211+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
31212@@ -43,7 +43,7 @@
31213 extern struct i2c_adapter amd756_smbus;
31214
31215 static struct i2c_adapter *s4882_adapter;
31216-static struct i2c_algorithm *s4882_algo;
31217+static i2c_algorithm_no_const *s4882_algo;
31218
31219 /* Wrapper access functions for multiplexed SMBus */
31220 static DEFINE_MUTEX(amd756_lock);
31221diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31222index 29015eb..af2d8e9 100644
31223--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31224+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
31225@@ -41,7 +41,7 @@
31226 extern struct i2c_adapter *nforce2_smbus;
31227
31228 static struct i2c_adapter *s4985_adapter;
31229-static struct i2c_algorithm *s4985_algo;
31230+static i2c_algorithm_no_const *s4985_algo;
31231
31232 /* Wrapper access functions for multiplexed SMBus */
31233 static DEFINE_MUTEX(nforce2_lock);
31234diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31235index d7a4833..7fae376 100644
31236--- a/drivers/i2c/i2c-mux.c
31237+++ b/drivers/i2c/i2c-mux.c
31238@@ -28,7 +28,7 @@
31239 /* multiplexer per channel data */
31240 struct i2c_mux_priv {
31241 struct i2c_adapter adap;
31242- struct i2c_algorithm algo;
31243+ i2c_algorithm_no_const algo;
31244
31245 struct i2c_adapter *parent;
31246 void *mux_dev; /* the mux chip/device */
31247diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31248index 57d00ca..0145194 100644
31249--- a/drivers/ide/aec62xx.c
31250+++ b/drivers/ide/aec62xx.c
31251@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
31252 .cable_detect = atp86x_cable_detect,
31253 };
31254
31255-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31256+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31257 { /* 0: AEC6210 */
31258 .name = DRV_NAME,
31259 .init_chipset = init_chipset_aec62xx,
31260diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31261index 2c8016a..911a27c 100644
31262--- a/drivers/ide/alim15x3.c
31263+++ b/drivers/ide/alim15x3.c
31264@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
31265 .dma_sff_read_status = ide_dma_sff_read_status,
31266 };
31267
31268-static const struct ide_port_info ali15x3_chipset __devinitdata = {
31269+static const struct ide_port_info ali15x3_chipset __devinitconst = {
31270 .name = DRV_NAME,
31271 .init_chipset = init_chipset_ali15x3,
31272 .init_hwif = init_hwif_ali15x3,
31273diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31274index 3747b25..56fc995 100644
31275--- a/drivers/ide/amd74xx.c
31276+++ b/drivers/ide/amd74xx.c
31277@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
31278 .udma_mask = udma, \
31279 }
31280
31281-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31282+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31283 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31284 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
31285 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
31286diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
31287index 15f0ead..cb43480 100644
31288--- a/drivers/ide/atiixp.c
31289+++ b/drivers/ide/atiixp.c
31290@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
31291 .cable_detect = atiixp_cable_detect,
31292 };
31293
31294-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
31295+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
31296 { /* 0: IXP200/300/400/700 */
31297 .name = DRV_NAME,
31298 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
31299diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
31300index 5f80312..d1fc438 100644
31301--- a/drivers/ide/cmd64x.c
31302+++ b/drivers/ide/cmd64x.c
31303@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
31304 .dma_sff_read_status = ide_dma_sff_read_status,
31305 };
31306
31307-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
31308+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
31309 { /* 0: CMD643 */
31310 .name = DRV_NAME,
31311 .init_chipset = init_chipset_cmd64x,
31312diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
31313index 2c1e5f7..1444762 100644
31314--- a/drivers/ide/cs5520.c
31315+++ b/drivers/ide/cs5520.c
31316@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
31317 .set_dma_mode = cs5520_set_dma_mode,
31318 };
31319
31320-static const struct ide_port_info cyrix_chipset __devinitdata = {
31321+static const struct ide_port_info cyrix_chipset __devinitconst = {
31322 .name = DRV_NAME,
31323 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
31324 .port_ops = &cs5520_port_ops,
31325diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
31326index 4dc4eb9..49b40ad 100644
31327--- a/drivers/ide/cs5530.c
31328+++ b/drivers/ide/cs5530.c
31329@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
31330 .udma_filter = cs5530_udma_filter,
31331 };
31332
31333-static const struct ide_port_info cs5530_chipset __devinitdata = {
31334+static const struct ide_port_info cs5530_chipset __devinitconst = {
31335 .name = DRV_NAME,
31336 .init_chipset = init_chipset_cs5530,
31337 .init_hwif = init_hwif_cs5530,
31338diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
31339index 5059faf..18d4c85 100644
31340--- a/drivers/ide/cs5535.c
31341+++ b/drivers/ide/cs5535.c
31342@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
31343 .cable_detect = cs5535_cable_detect,
31344 };
31345
31346-static const struct ide_port_info cs5535_chipset __devinitdata = {
31347+static const struct ide_port_info cs5535_chipset __devinitconst = {
31348 .name = DRV_NAME,
31349 .port_ops = &cs5535_port_ops,
31350 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
31351diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
31352index 847553f..3ffb49d 100644
31353--- a/drivers/ide/cy82c693.c
31354+++ b/drivers/ide/cy82c693.c
31355@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
31356 .set_dma_mode = cy82c693_set_dma_mode,
31357 };
31358
31359-static const struct ide_port_info cy82c693_chipset __devinitdata = {
31360+static const struct ide_port_info cy82c693_chipset __devinitconst = {
31361 .name = DRV_NAME,
31362 .init_iops = init_iops_cy82c693,
31363 .port_ops = &cy82c693_port_ops,
31364diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
31365index 58c51cd..4aec3b8 100644
31366--- a/drivers/ide/hpt366.c
31367+++ b/drivers/ide/hpt366.c
31368@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
31369 }
31370 };
31371
31372-static const struct hpt_info hpt36x __devinitdata = {
31373+static const struct hpt_info hpt36x __devinitconst = {
31374 .chip_name = "HPT36x",
31375 .chip_type = HPT36x,
31376 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
31377@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
31378 .timings = &hpt36x_timings
31379 };
31380
31381-static const struct hpt_info hpt370 __devinitdata = {
31382+static const struct hpt_info hpt370 __devinitconst = {
31383 .chip_name = "HPT370",
31384 .chip_type = HPT370,
31385 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31386@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
31387 .timings = &hpt37x_timings
31388 };
31389
31390-static const struct hpt_info hpt370a __devinitdata = {
31391+static const struct hpt_info hpt370a __devinitconst = {
31392 .chip_name = "HPT370A",
31393 .chip_type = HPT370A,
31394 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31395@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
31396 .timings = &hpt37x_timings
31397 };
31398
31399-static const struct hpt_info hpt374 __devinitdata = {
31400+static const struct hpt_info hpt374 __devinitconst = {
31401 .chip_name = "HPT374",
31402 .chip_type = HPT374,
31403 .udma_mask = ATA_UDMA5,
31404@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
31405 .timings = &hpt37x_timings
31406 };
31407
31408-static const struct hpt_info hpt372 __devinitdata = {
31409+static const struct hpt_info hpt372 __devinitconst = {
31410 .chip_name = "HPT372",
31411 .chip_type = HPT372,
31412 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31413@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
31414 .timings = &hpt37x_timings
31415 };
31416
31417-static const struct hpt_info hpt372a __devinitdata = {
31418+static const struct hpt_info hpt372a __devinitconst = {
31419 .chip_name = "HPT372A",
31420 .chip_type = HPT372A,
31421 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31422@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
31423 .timings = &hpt37x_timings
31424 };
31425
31426-static const struct hpt_info hpt302 __devinitdata = {
31427+static const struct hpt_info hpt302 __devinitconst = {
31428 .chip_name = "HPT302",
31429 .chip_type = HPT302,
31430 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31431@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
31432 .timings = &hpt37x_timings
31433 };
31434
31435-static const struct hpt_info hpt371 __devinitdata = {
31436+static const struct hpt_info hpt371 __devinitconst = {
31437 .chip_name = "HPT371",
31438 .chip_type = HPT371,
31439 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31440@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
31441 .timings = &hpt37x_timings
31442 };
31443
31444-static const struct hpt_info hpt372n __devinitdata = {
31445+static const struct hpt_info hpt372n __devinitconst = {
31446 .chip_name = "HPT372N",
31447 .chip_type = HPT372N,
31448 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31449@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
31450 .timings = &hpt37x_timings
31451 };
31452
31453-static const struct hpt_info hpt302n __devinitdata = {
31454+static const struct hpt_info hpt302n __devinitconst = {
31455 .chip_name = "HPT302N",
31456 .chip_type = HPT302N,
31457 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31458@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
31459 .timings = &hpt37x_timings
31460 };
31461
31462-static const struct hpt_info hpt371n __devinitdata = {
31463+static const struct hpt_info hpt371n __devinitconst = {
31464 .chip_name = "HPT371N",
31465 .chip_type = HPT371N,
31466 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31467@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
31468 .dma_sff_read_status = ide_dma_sff_read_status,
31469 };
31470
31471-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
31472+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
31473 { /* 0: HPT36x */
31474 .name = DRV_NAME,
31475 .init_chipset = init_chipset_hpt366,
31476diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
31477index 8126824..55a2798 100644
31478--- a/drivers/ide/ide-cd.c
31479+++ b/drivers/ide/ide-cd.c
31480@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
31481 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31482 if ((unsigned long)buf & alignment
31483 || blk_rq_bytes(rq) & q->dma_pad_mask
31484- || object_is_on_stack(buf))
31485+ || object_starts_on_stack(buf))
31486 drive->dma = 0;
31487 }
31488 }
31489diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
31490index 7f56b73..dab5b67 100644
31491--- a/drivers/ide/ide-pci-generic.c
31492+++ b/drivers/ide/ide-pci-generic.c
31493@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
31494 .udma_mask = ATA_UDMA6, \
31495 }
31496
31497-static const struct ide_port_info generic_chipsets[] __devinitdata = {
31498+static const struct ide_port_info generic_chipsets[] __devinitconst = {
31499 /* 0: Unknown */
31500 DECLARE_GENERIC_PCI_DEV(0),
31501
31502diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31503index 560e66d..d5dd180 100644
31504--- a/drivers/ide/it8172.c
31505+++ b/drivers/ide/it8172.c
31506@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31507 .set_dma_mode = it8172_set_dma_mode,
31508 };
31509
31510-static const struct ide_port_info it8172_port_info __devinitdata = {
31511+static const struct ide_port_info it8172_port_info __devinitconst = {
31512 .name = DRV_NAME,
31513 .port_ops = &it8172_port_ops,
31514 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31515diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31516index 46816ba..1847aeb 100644
31517--- a/drivers/ide/it8213.c
31518+++ b/drivers/ide/it8213.c
31519@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31520 .cable_detect = it8213_cable_detect,
31521 };
31522
31523-static const struct ide_port_info it8213_chipset __devinitdata = {
31524+static const struct ide_port_info it8213_chipset __devinitconst = {
31525 .name = DRV_NAME,
31526 .enablebits = { {0x41, 0x80, 0x80} },
31527 .port_ops = &it8213_port_ops,
31528diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31529index 2e3169f..c5611db 100644
31530--- a/drivers/ide/it821x.c
31531+++ b/drivers/ide/it821x.c
31532@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31533 .cable_detect = it821x_cable_detect,
31534 };
31535
31536-static const struct ide_port_info it821x_chipset __devinitdata = {
31537+static const struct ide_port_info it821x_chipset __devinitconst = {
31538 .name = DRV_NAME,
31539 .init_chipset = init_chipset_it821x,
31540 .init_hwif = init_hwif_it821x,
31541diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31542index 74c2c4a..efddd7d 100644
31543--- a/drivers/ide/jmicron.c
31544+++ b/drivers/ide/jmicron.c
31545@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31546 .cable_detect = jmicron_cable_detect,
31547 };
31548
31549-static const struct ide_port_info jmicron_chipset __devinitdata = {
31550+static const struct ide_port_info jmicron_chipset __devinitconst = {
31551 .name = DRV_NAME,
31552 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31553 .port_ops = &jmicron_port_ops,
31554diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31555index 95327a2..73f78d8 100644
31556--- a/drivers/ide/ns87415.c
31557+++ b/drivers/ide/ns87415.c
31558@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31559 .dma_sff_read_status = superio_dma_sff_read_status,
31560 };
31561
31562-static const struct ide_port_info ns87415_chipset __devinitdata = {
31563+static const struct ide_port_info ns87415_chipset __devinitconst = {
31564 .name = DRV_NAME,
31565 .init_hwif = init_hwif_ns87415,
31566 .tp_ops = &ns87415_tp_ops,
31567diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31568index 1a53a4c..39edc66 100644
31569--- a/drivers/ide/opti621.c
31570+++ b/drivers/ide/opti621.c
31571@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31572 .set_pio_mode = opti621_set_pio_mode,
31573 };
31574
31575-static const struct ide_port_info opti621_chipset __devinitdata = {
31576+static const struct ide_port_info opti621_chipset __devinitconst = {
31577 .name = DRV_NAME,
31578 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31579 .port_ops = &opti621_port_ops,
31580diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31581index 9546fe2..2e5ceb6 100644
31582--- a/drivers/ide/pdc202xx_new.c
31583+++ b/drivers/ide/pdc202xx_new.c
31584@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31585 .udma_mask = udma, \
31586 }
31587
31588-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31589+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31590 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31591 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31592 };
31593diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31594index 3a35ec6..5634510 100644
31595--- a/drivers/ide/pdc202xx_old.c
31596+++ b/drivers/ide/pdc202xx_old.c
31597@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31598 .max_sectors = sectors, \
31599 }
31600
31601-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31602+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31603 { /* 0: PDC20246 */
31604 .name = DRV_NAME,
31605 .init_chipset = init_chipset_pdc202xx,
31606diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31607index 1892e81..fe0fd60 100644
31608--- a/drivers/ide/piix.c
31609+++ b/drivers/ide/piix.c
31610@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31611 .udma_mask = udma, \
31612 }
31613
31614-static const struct ide_port_info piix_pci_info[] __devinitdata = {
31615+static const struct ide_port_info piix_pci_info[] __devinitconst = {
31616 /* 0: MPIIX */
31617 { /*
31618 * MPIIX actually has only a single IDE channel mapped to
31619diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31620index a6414a8..c04173e 100644
31621--- a/drivers/ide/rz1000.c
31622+++ b/drivers/ide/rz1000.c
31623@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31624 }
31625 }
31626
31627-static const struct ide_port_info rz1000_chipset __devinitdata = {
31628+static const struct ide_port_info rz1000_chipset __devinitconst = {
31629 .name = DRV_NAME,
31630 .host_flags = IDE_HFLAG_NO_DMA,
31631 };
31632diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31633index 356b9b5..d4758eb 100644
31634--- a/drivers/ide/sc1200.c
31635+++ b/drivers/ide/sc1200.c
31636@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31637 .dma_sff_read_status = ide_dma_sff_read_status,
31638 };
31639
31640-static const struct ide_port_info sc1200_chipset __devinitdata = {
31641+static const struct ide_port_info sc1200_chipset __devinitconst = {
31642 .name = DRV_NAME,
31643 .port_ops = &sc1200_port_ops,
31644 .dma_ops = &sc1200_dma_ops,
31645diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31646index b7f5b0c..9701038 100644
31647--- a/drivers/ide/scc_pata.c
31648+++ b/drivers/ide/scc_pata.c
31649@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
31650 .dma_sff_read_status = scc_dma_sff_read_status,
31651 };
31652
31653-static const struct ide_port_info scc_chipset __devinitdata = {
31654+static const struct ide_port_info scc_chipset __devinitconst = {
31655 .name = "sccIDE",
31656 .init_iops = init_iops_scc,
31657 .init_dma = scc_init_dma,
31658diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31659index 35fb8da..24d72ef 100644
31660--- a/drivers/ide/serverworks.c
31661+++ b/drivers/ide/serverworks.c
31662@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
31663 .cable_detect = svwks_cable_detect,
31664 };
31665
31666-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31667+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31668 { /* 0: OSB4 */
31669 .name = DRV_NAME,
31670 .init_chipset = init_chipset_svwks,
31671diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31672index ddeda44..46f7e30 100644
31673--- a/drivers/ide/siimage.c
31674+++ b/drivers/ide/siimage.c
31675@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
31676 .udma_mask = ATA_UDMA6, \
31677 }
31678
31679-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31680+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31681 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31682 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31683 };
31684diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31685index 4a00225..09e61b4 100644
31686--- a/drivers/ide/sis5513.c
31687+++ b/drivers/ide/sis5513.c
31688@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
31689 .cable_detect = sis_cable_detect,
31690 };
31691
31692-static const struct ide_port_info sis5513_chipset __devinitdata = {
31693+static const struct ide_port_info sis5513_chipset __devinitconst = {
31694 .name = DRV_NAME,
31695 .init_chipset = init_chipset_sis5513,
31696 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
31697diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
31698index f21dc2a..d051cd2 100644
31699--- a/drivers/ide/sl82c105.c
31700+++ b/drivers/ide/sl82c105.c
31701@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
31702 .dma_sff_read_status = ide_dma_sff_read_status,
31703 };
31704
31705-static const struct ide_port_info sl82c105_chipset __devinitdata = {
31706+static const struct ide_port_info sl82c105_chipset __devinitconst = {
31707 .name = DRV_NAME,
31708 .init_chipset = init_chipset_sl82c105,
31709 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
31710diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
31711index 864ffe0..863a5e9 100644
31712--- a/drivers/ide/slc90e66.c
31713+++ b/drivers/ide/slc90e66.c
31714@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
31715 .cable_detect = slc90e66_cable_detect,
31716 };
31717
31718-static const struct ide_port_info slc90e66_chipset __devinitdata = {
31719+static const struct ide_port_info slc90e66_chipset __devinitconst = {
31720 .name = DRV_NAME,
31721 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
31722 .port_ops = &slc90e66_port_ops,
31723diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
31724index 4799d5c..1794678 100644
31725--- a/drivers/ide/tc86c001.c
31726+++ b/drivers/ide/tc86c001.c
31727@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
31728 .dma_sff_read_status = ide_dma_sff_read_status,
31729 };
31730
31731-static const struct ide_port_info tc86c001_chipset __devinitdata = {
31732+static const struct ide_port_info tc86c001_chipset __devinitconst = {
31733 .name = DRV_NAME,
31734 .init_hwif = init_hwif_tc86c001,
31735 .port_ops = &tc86c001_port_ops,
31736diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
31737index 281c914..55ce1b8 100644
31738--- a/drivers/ide/triflex.c
31739+++ b/drivers/ide/triflex.c
31740@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
31741 .set_dma_mode = triflex_set_mode,
31742 };
31743
31744-static const struct ide_port_info triflex_device __devinitdata = {
31745+static const struct ide_port_info triflex_device __devinitconst = {
31746 .name = DRV_NAME,
31747 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
31748 .port_ops = &triflex_port_ops,
31749diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
31750index 4b42ca0..e494a98 100644
31751--- a/drivers/ide/trm290.c
31752+++ b/drivers/ide/trm290.c
31753@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
31754 .dma_check = trm290_dma_check,
31755 };
31756
31757-static const struct ide_port_info trm290_chipset __devinitdata = {
31758+static const struct ide_port_info trm290_chipset __devinitconst = {
31759 .name = DRV_NAME,
31760 .init_hwif = init_hwif_trm290,
31761 .tp_ops = &trm290_tp_ops,
31762diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
31763index f46f49c..eb77678 100644
31764--- a/drivers/ide/via82cxxx.c
31765+++ b/drivers/ide/via82cxxx.c
31766@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
31767 .cable_detect = via82cxxx_cable_detect,
31768 };
31769
31770-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
31771+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
31772 .name = DRV_NAME,
31773 .init_chipset = init_chipset_via82cxxx,
31774 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
31775diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
31776index 73d4531..c90cd2d 100644
31777--- a/drivers/ieee802154/fakehard.c
31778+++ b/drivers/ieee802154/fakehard.c
31779@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
31780 phy->transmit_power = 0xbf;
31781
31782 dev->netdev_ops = &fake_ops;
31783- dev->ml_priv = &fake_mlme;
31784+ dev->ml_priv = (void *)&fake_mlme;
31785
31786 priv = netdev_priv(dev);
31787 priv->phy = phy;
31788diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
31789index c889aae..6cf5aa7 100644
31790--- a/drivers/infiniband/core/cm.c
31791+++ b/drivers/infiniband/core/cm.c
31792@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
31793
31794 struct cm_counter_group {
31795 struct kobject obj;
31796- atomic_long_t counter[CM_ATTR_COUNT];
31797+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
31798 };
31799
31800 struct cm_counter_attribute {
31801@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
31802 struct ib_mad_send_buf *msg = NULL;
31803 int ret;
31804
31805- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31806+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31807 counter[CM_REQ_COUNTER]);
31808
31809 /* Quick state check to discard duplicate REQs. */
31810@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
31811 if (!cm_id_priv)
31812 return;
31813
31814- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31815+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31816 counter[CM_REP_COUNTER]);
31817 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
31818 if (ret)
31819@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
31820 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
31821 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
31822 spin_unlock_irq(&cm_id_priv->lock);
31823- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31824+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31825 counter[CM_RTU_COUNTER]);
31826 goto out;
31827 }
31828@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
31829 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
31830 dreq_msg->local_comm_id);
31831 if (!cm_id_priv) {
31832- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31833+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31834 counter[CM_DREQ_COUNTER]);
31835 cm_issue_drep(work->port, work->mad_recv_wc);
31836 return -EINVAL;
31837@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
31838 case IB_CM_MRA_REP_RCVD:
31839 break;
31840 case IB_CM_TIMEWAIT:
31841- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31842+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31843 counter[CM_DREQ_COUNTER]);
31844 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31845 goto unlock;
31846@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
31847 cm_free_msg(msg);
31848 goto deref;
31849 case IB_CM_DREQ_RCVD:
31850- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31851+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31852 counter[CM_DREQ_COUNTER]);
31853 goto unlock;
31854 default:
31855@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
31856 ib_modify_mad(cm_id_priv->av.port->mad_agent,
31857 cm_id_priv->msg, timeout)) {
31858 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
31859- atomic_long_inc(&work->port->
31860+ atomic_long_inc_unchecked(&work->port->
31861 counter_group[CM_RECV_DUPLICATES].
31862 counter[CM_MRA_COUNTER]);
31863 goto out;
31864@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
31865 break;
31866 case IB_CM_MRA_REQ_RCVD:
31867 case IB_CM_MRA_REP_RCVD:
31868- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31869+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31870 counter[CM_MRA_COUNTER]);
31871 /* fall through */
31872 default:
31873@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
31874 case IB_CM_LAP_IDLE:
31875 break;
31876 case IB_CM_MRA_LAP_SENT:
31877- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31878+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31879 counter[CM_LAP_COUNTER]);
31880 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
31881 goto unlock;
31882@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
31883 cm_free_msg(msg);
31884 goto deref;
31885 case IB_CM_LAP_RCVD:
31886- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31887+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31888 counter[CM_LAP_COUNTER]);
31889 goto unlock;
31890 default:
31891@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
31892 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
31893 if (cur_cm_id_priv) {
31894 spin_unlock_irq(&cm.lock);
31895- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
31896+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
31897 counter[CM_SIDR_REQ_COUNTER]);
31898 goto out; /* Duplicate message. */
31899 }
31900@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
31901 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
31902 msg->retries = 1;
31903
31904- atomic_long_add(1 + msg->retries,
31905+ atomic_long_add_unchecked(1 + msg->retries,
31906 &port->counter_group[CM_XMIT].counter[attr_index]);
31907 if (msg->retries)
31908- atomic_long_add(msg->retries,
31909+ atomic_long_add_unchecked(msg->retries,
31910 &port->counter_group[CM_XMIT_RETRIES].
31911 counter[attr_index]);
31912
31913@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
31914 }
31915
31916 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
31917- atomic_long_inc(&port->counter_group[CM_RECV].
31918+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
31919 counter[attr_id - CM_ATTR_ID_OFFSET]);
31920
31921 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
31922@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
31923 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
31924
31925 return sprintf(buf, "%ld\n",
31926- atomic_long_read(&group->counter[cm_attr->index]));
31927+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
31928 }
31929
31930 static const struct sysfs_ops cm_counter_ops = {
31931diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
31932index 176c8f9..2627b62 100644
31933--- a/drivers/infiniband/core/fmr_pool.c
31934+++ b/drivers/infiniband/core/fmr_pool.c
31935@@ -98,8 +98,8 @@ struct ib_fmr_pool {
31936
31937 struct task_struct *thread;
31938
31939- atomic_t req_ser;
31940- atomic_t flush_ser;
31941+ atomic_unchecked_t req_ser;
31942+ atomic_unchecked_t flush_ser;
31943
31944 wait_queue_head_t force_wait;
31945 };
31946@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
31947 struct ib_fmr_pool *pool = pool_ptr;
31948
31949 do {
31950- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
31951+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
31952 ib_fmr_batch_release(pool);
31953
31954- atomic_inc(&pool->flush_ser);
31955+ atomic_inc_unchecked(&pool->flush_ser);
31956 wake_up_interruptible(&pool->force_wait);
31957
31958 if (pool->flush_function)
31959@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
31960 }
31961
31962 set_current_state(TASK_INTERRUPTIBLE);
31963- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
31964+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
31965 !kthread_should_stop())
31966 schedule();
31967 __set_current_state(TASK_RUNNING);
31968@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
31969 pool->dirty_watermark = params->dirty_watermark;
31970 pool->dirty_len = 0;
31971 spin_lock_init(&pool->pool_lock);
31972- atomic_set(&pool->req_ser, 0);
31973- atomic_set(&pool->flush_ser, 0);
31974+ atomic_set_unchecked(&pool->req_ser, 0);
31975+ atomic_set_unchecked(&pool->flush_ser, 0);
31976 init_waitqueue_head(&pool->force_wait);
31977
31978 pool->thread = kthread_run(ib_fmr_cleanup_thread,
31979@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
31980 }
31981 spin_unlock_irq(&pool->pool_lock);
31982
31983- serial = atomic_inc_return(&pool->req_ser);
31984+ serial = atomic_inc_return_unchecked(&pool->req_ser);
31985 wake_up_process(pool->thread);
31986
31987 if (wait_event_interruptible(pool->force_wait,
31988- atomic_read(&pool->flush_ser) - serial >= 0))
31989+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
31990 return -EINTR;
31991
31992 return 0;
31993@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
31994 } else {
31995 list_add_tail(&fmr->list, &pool->dirty_list);
31996 if (++pool->dirty_len >= pool->dirty_watermark) {
31997- atomic_inc(&pool->req_ser);
31998+ atomic_inc_unchecked(&pool->req_ser);
31999 wake_up_process(pool->thread);
32000 }
32001 }
32002diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32003index 40c8353..946b0e4 100644
32004--- a/drivers/infiniband/hw/cxgb4/mem.c
32005+++ b/drivers/infiniband/hw/cxgb4/mem.c
32006@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32007 int err;
32008 struct fw_ri_tpte tpt;
32009 u32 stag_idx;
32010- static atomic_t key;
32011+ static atomic_unchecked_t key;
32012
32013 if (c4iw_fatal_error(rdev))
32014 return -EIO;
32015@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32016 &rdev->resource.tpt_fifo_lock);
32017 if (!stag_idx)
32018 return -ENOMEM;
32019- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32020+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32021 }
32022 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32023 __func__, stag_state, type, pdid, stag_idx);
32024diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
32025index a4de9d5..5fa20c3 100644
32026--- a/drivers/infiniband/hw/ipath/ipath_fs.c
32027+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
32028@@ -126,6 +126,8 @@ static const struct file_operations atomic_counters_ops = {
32029 };
32030
32031 static ssize_t flash_read(struct file *file, char __user *buf,
32032+ size_t count, loff_t *ppos) __size_overflow(3);
32033+static ssize_t flash_read(struct file *file, char __user *buf,
32034 size_t count, loff_t *ppos)
32035 {
32036 struct ipath_devdata *dd;
32037@@ -177,6 +179,8 @@ bail:
32038 }
32039
32040 static ssize_t flash_write(struct file *file, const char __user *buf,
32041+ size_t count, loff_t *ppos) __size_overflow(3);
32042+static ssize_t flash_write(struct file *file, const char __user *buf,
32043 size_t count, loff_t *ppos)
32044 {
32045 struct ipath_devdata *dd;
32046diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32047index 79b3dbc..96e5fcc 100644
32048--- a/drivers/infiniband/hw/ipath/ipath_rc.c
32049+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32050@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32051 struct ib_atomic_eth *ateth;
32052 struct ipath_ack_entry *e;
32053 u64 vaddr;
32054- atomic64_t *maddr;
32055+ atomic64_unchecked_t *maddr;
32056 u64 sdata;
32057 u32 rkey;
32058 u8 next;
32059@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32060 IB_ACCESS_REMOTE_ATOMIC)))
32061 goto nack_acc_unlck;
32062 /* Perform atomic OP and save result. */
32063- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32064+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32065 sdata = be64_to_cpu(ateth->swap_data);
32066 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32067 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32068- (u64) atomic64_add_return(sdata, maddr) - sdata :
32069+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32070 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32071 be64_to_cpu(ateth->compare_data),
32072 sdata);
32073diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32074index 1f95bba..9530f87 100644
32075--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32076+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32077@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
32078 unsigned long flags;
32079 struct ib_wc wc;
32080 u64 sdata;
32081- atomic64_t *maddr;
32082+ atomic64_unchecked_t *maddr;
32083 enum ib_wc_status send_status;
32084
32085 /*
32086@@ -382,11 +382,11 @@ again:
32087 IB_ACCESS_REMOTE_ATOMIC)))
32088 goto acc_err;
32089 /* Perform atomic OP and save result. */
32090- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32091+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32092 sdata = wqe->wr.wr.atomic.compare_add;
32093 *(u64 *) sqp->s_sge.sge.vaddr =
32094 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32095- (u64) atomic64_add_return(sdata, maddr) - sdata :
32096+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32097 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32098 sdata, wqe->wr.wr.atomic.swap);
32099 goto send_comp;
32100diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
32101index 7140199..da60063 100644
32102--- a/drivers/infiniband/hw/nes/nes.c
32103+++ b/drivers/infiniband/hw/nes/nes.c
32104@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
32105 LIST_HEAD(nes_adapter_list);
32106 static LIST_HEAD(nes_dev_list);
32107
32108-atomic_t qps_destroyed;
32109+atomic_unchecked_t qps_destroyed;
32110
32111 static unsigned int ee_flsh_adapter;
32112 static unsigned int sysfs_nonidx_addr;
32113@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
32114 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32115 struct nes_adapter *nesadapter = nesdev->nesadapter;
32116
32117- atomic_inc(&qps_destroyed);
32118+ atomic_inc_unchecked(&qps_destroyed);
32119
32120 /* Free the control structures */
32121
32122diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
32123index c438e46..ca30356 100644
32124--- a/drivers/infiniband/hw/nes/nes.h
32125+++ b/drivers/infiniband/hw/nes/nes.h
32126@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
32127 extern unsigned int wqm_quanta;
32128 extern struct list_head nes_adapter_list;
32129
32130-extern atomic_t cm_connects;
32131-extern atomic_t cm_accepts;
32132-extern atomic_t cm_disconnects;
32133-extern atomic_t cm_closes;
32134-extern atomic_t cm_connecteds;
32135-extern atomic_t cm_connect_reqs;
32136-extern atomic_t cm_rejects;
32137-extern atomic_t mod_qp_timouts;
32138-extern atomic_t qps_created;
32139-extern atomic_t qps_destroyed;
32140-extern atomic_t sw_qps_destroyed;
32141+extern atomic_unchecked_t cm_connects;
32142+extern atomic_unchecked_t cm_accepts;
32143+extern atomic_unchecked_t cm_disconnects;
32144+extern atomic_unchecked_t cm_closes;
32145+extern atomic_unchecked_t cm_connecteds;
32146+extern atomic_unchecked_t cm_connect_reqs;
32147+extern atomic_unchecked_t cm_rejects;
32148+extern atomic_unchecked_t mod_qp_timouts;
32149+extern atomic_unchecked_t qps_created;
32150+extern atomic_unchecked_t qps_destroyed;
32151+extern atomic_unchecked_t sw_qps_destroyed;
32152 extern u32 mh_detected;
32153 extern u32 mh_pauses_sent;
32154 extern u32 cm_packets_sent;
32155@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
32156 extern u32 cm_packets_received;
32157 extern u32 cm_packets_dropped;
32158 extern u32 cm_packets_retrans;
32159-extern atomic_t cm_listens_created;
32160-extern atomic_t cm_listens_destroyed;
32161+extern atomic_unchecked_t cm_listens_created;
32162+extern atomic_unchecked_t cm_listens_destroyed;
32163 extern u32 cm_backlog_drops;
32164-extern atomic_t cm_loopbacks;
32165-extern atomic_t cm_nodes_created;
32166-extern atomic_t cm_nodes_destroyed;
32167-extern atomic_t cm_accel_dropped_pkts;
32168-extern atomic_t cm_resets_recvd;
32169-extern atomic_t pau_qps_created;
32170-extern atomic_t pau_qps_destroyed;
32171+extern atomic_unchecked_t cm_loopbacks;
32172+extern atomic_unchecked_t cm_nodes_created;
32173+extern atomic_unchecked_t cm_nodes_destroyed;
32174+extern atomic_unchecked_t cm_accel_dropped_pkts;
32175+extern atomic_unchecked_t cm_resets_recvd;
32176+extern atomic_unchecked_t pau_qps_created;
32177+extern atomic_unchecked_t pau_qps_destroyed;
32178
32179 extern u32 int_mod_timer_init;
32180 extern u32 int_mod_cq_depth_256;
32181diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
32182index a4972ab..1bcfc31 100644
32183--- a/drivers/infiniband/hw/nes/nes_cm.c
32184+++ b/drivers/infiniband/hw/nes/nes_cm.c
32185@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32186 u32 cm_packets_retrans;
32187 u32 cm_packets_created;
32188 u32 cm_packets_received;
32189-atomic_t cm_listens_created;
32190-atomic_t cm_listens_destroyed;
32191+atomic_unchecked_t cm_listens_created;
32192+atomic_unchecked_t cm_listens_destroyed;
32193 u32 cm_backlog_drops;
32194-atomic_t cm_loopbacks;
32195-atomic_t cm_nodes_created;
32196-atomic_t cm_nodes_destroyed;
32197-atomic_t cm_accel_dropped_pkts;
32198-atomic_t cm_resets_recvd;
32199+atomic_unchecked_t cm_loopbacks;
32200+atomic_unchecked_t cm_nodes_created;
32201+atomic_unchecked_t cm_nodes_destroyed;
32202+atomic_unchecked_t cm_accel_dropped_pkts;
32203+atomic_unchecked_t cm_resets_recvd;
32204
32205 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32206 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32207@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
32208
32209 static struct nes_cm_core *g_cm_core;
32210
32211-atomic_t cm_connects;
32212-atomic_t cm_accepts;
32213-atomic_t cm_disconnects;
32214-atomic_t cm_closes;
32215-atomic_t cm_connecteds;
32216-atomic_t cm_connect_reqs;
32217-atomic_t cm_rejects;
32218+atomic_unchecked_t cm_connects;
32219+atomic_unchecked_t cm_accepts;
32220+atomic_unchecked_t cm_disconnects;
32221+atomic_unchecked_t cm_closes;
32222+atomic_unchecked_t cm_connecteds;
32223+atomic_unchecked_t cm_connect_reqs;
32224+atomic_unchecked_t cm_rejects;
32225
32226 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32227 {
32228@@ -1274,7 +1274,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
32229 kfree(listener);
32230 listener = NULL;
32231 ret = 0;
32232- atomic_inc(&cm_listens_destroyed);
32233+ atomic_inc_unchecked(&cm_listens_destroyed);
32234 } else {
32235 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32236 }
32237@@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
32238 cm_node->rem_mac);
32239
32240 add_hte_node(cm_core, cm_node);
32241- atomic_inc(&cm_nodes_created);
32242+ atomic_inc_unchecked(&cm_nodes_created);
32243
32244 return cm_node;
32245 }
32246@@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
32247 }
32248
32249 atomic_dec(&cm_core->node_cnt);
32250- atomic_inc(&cm_nodes_destroyed);
32251+ atomic_inc_unchecked(&cm_nodes_destroyed);
32252 nesqp = cm_node->nesqp;
32253 if (nesqp) {
32254 nesqp->cm_node = NULL;
32255@@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
32256
32257 static void drop_packet(struct sk_buff *skb)
32258 {
32259- atomic_inc(&cm_accel_dropped_pkts);
32260+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
32261 dev_kfree_skb_any(skb);
32262 }
32263
32264@@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
32265 {
32266
32267 int reset = 0; /* whether to send reset in case of err.. */
32268- atomic_inc(&cm_resets_recvd);
32269+ atomic_inc_unchecked(&cm_resets_recvd);
32270 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32271 " refcnt=%d\n", cm_node, cm_node->state,
32272 atomic_read(&cm_node->ref_count));
32273@@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
32274 rem_ref_cm_node(cm_node->cm_core, cm_node);
32275 return NULL;
32276 }
32277- atomic_inc(&cm_loopbacks);
32278+ atomic_inc_unchecked(&cm_loopbacks);
32279 loopbackremotenode->loopbackpartner = cm_node;
32280 loopbackremotenode->tcp_cntxt.rcv_wscale =
32281 NES_CM_DEFAULT_RCV_WND_SCALE;
32282@@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
32283 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32284 else {
32285 rem_ref_cm_node(cm_core, cm_node);
32286- atomic_inc(&cm_accel_dropped_pkts);
32287+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
32288 dev_kfree_skb_any(skb);
32289 }
32290 break;
32291@@ -2881,7 +2881,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32292
32293 if ((cm_id) && (cm_id->event_handler)) {
32294 if (issue_disconn) {
32295- atomic_inc(&cm_disconnects);
32296+ atomic_inc_unchecked(&cm_disconnects);
32297 cm_event.event = IW_CM_EVENT_DISCONNECT;
32298 cm_event.status = disconn_status;
32299 cm_event.local_addr = cm_id->local_addr;
32300@@ -2903,7 +2903,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32301 }
32302
32303 if (issue_close) {
32304- atomic_inc(&cm_closes);
32305+ atomic_inc_unchecked(&cm_closes);
32306 nes_disconnect(nesqp, 1);
32307
32308 cm_id->provider_data = nesqp;
32309@@ -3039,7 +3039,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32310
32311 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
32312 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
32313- atomic_inc(&cm_accepts);
32314+ atomic_inc_unchecked(&cm_accepts);
32315
32316 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
32317 netdev_refcnt_read(nesvnic->netdev));
32318@@ -3241,7 +3241,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
32319 struct nes_cm_core *cm_core;
32320 u8 *start_buff;
32321
32322- atomic_inc(&cm_rejects);
32323+ atomic_inc_unchecked(&cm_rejects);
32324 cm_node = (struct nes_cm_node *)cm_id->provider_data;
32325 loopback = cm_node->loopbackpartner;
32326 cm_core = cm_node->cm_core;
32327@@ -3301,7 +3301,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32328 ntohl(cm_id->local_addr.sin_addr.s_addr),
32329 ntohs(cm_id->local_addr.sin_port));
32330
32331- atomic_inc(&cm_connects);
32332+ atomic_inc_unchecked(&cm_connects);
32333 nesqp->active_conn = 1;
32334
32335 /* cache the cm_id in the qp */
32336@@ -3407,7 +3407,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
32337 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
32338 return err;
32339 }
32340- atomic_inc(&cm_listens_created);
32341+ atomic_inc_unchecked(&cm_listens_created);
32342 }
32343
32344 cm_id->add_ref(cm_id);
32345@@ -3508,7 +3508,7 @@ static void cm_event_connected(struct nes_cm_event *event)
32346
32347 if (nesqp->destroyed)
32348 return;
32349- atomic_inc(&cm_connecteds);
32350+ atomic_inc_unchecked(&cm_connecteds);
32351 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
32352 " local port 0x%04X. jiffies = %lu.\n",
32353 nesqp->hwqp.qp_id,
32354@@ -3695,7 +3695,7 @@ static void cm_event_reset(struct nes_cm_event *event)
32355
32356 cm_id->add_ref(cm_id);
32357 ret = cm_id->event_handler(cm_id, &cm_event);
32358- atomic_inc(&cm_closes);
32359+ atomic_inc_unchecked(&cm_closes);
32360 cm_event.event = IW_CM_EVENT_CLOSE;
32361 cm_event.status = 0;
32362 cm_event.provider_data = cm_id->provider_data;
32363@@ -3731,7 +3731,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
32364 return;
32365 cm_id = cm_node->cm_id;
32366
32367- atomic_inc(&cm_connect_reqs);
32368+ atomic_inc_unchecked(&cm_connect_reqs);
32369 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32370 cm_node, cm_id, jiffies);
32371
32372@@ -3771,7 +3771,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
32373 return;
32374 cm_id = cm_node->cm_id;
32375
32376- atomic_inc(&cm_connect_reqs);
32377+ atomic_inc_unchecked(&cm_connect_reqs);
32378 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32379 cm_node, cm_id, jiffies);
32380
32381diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
32382index 3ba7be3..c81f6ff 100644
32383--- a/drivers/infiniband/hw/nes/nes_mgt.c
32384+++ b/drivers/infiniband/hw/nes/nes_mgt.c
32385@@ -40,8 +40,8 @@
32386 #include "nes.h"
32387 #include "nes_mgt.h"
32388
32389-atomic_t pau_qps_created;
32390-atomic_t pau_qps_destroyed;
32391+atomic_unchecked_t pau_qps_created;
32392+atomic_unchecked_t pau_qps_destroyed;
32393
32394 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
32395 {
32396@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
32397 {
32398 struct sk_buff *skb;
32399 unsigned long flags;
32400- atomic_inc(&pau_qps_destroyed);
32401+ atomic_inc_unchecked(&pau_qps_destroyed);
32402
32403 /* Free packets that have not yet been forwarded */
32404 /* Lock is acquired by skb_dequeue when removing the skb */
32405@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
32406 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
32407 skb_queue_head_init(&nesqp->pau_list);
32408 spin_lock_init(&nesqp->pau_lock);
32409- atomic_inc(&pau_qps_created);
32410+ atomic_inc_unchecked(&pau_qps_created);
32411 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
32412 }
32413
32414diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
32415index f3a3ecf..57d311d 100644
32416--- a/drivers/infiniband/hw/nes/nes_nic.c
32417+++ b/drivers/infiniband/hw/nes/nes_nic.c
32418@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
32419 target_stat_values[++index] = mh_detected;
32420 target_stat_values[++index] = mh_pauses_sent;
32421 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32422- target_stat_values[++index] = atomic_read(&cm_connects);
32423- target_stat_values[++index] = atomic_read(&cm_accepts);
32424- target_stat_values[++index] = atomic_read(&cm_disconnects);
32425- target_stat_values[++index] = atomic_read(&cm_connecteds);
32426- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32427- target_stat_values[++index] = atomic_read(&cm_rejects);
32428- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32429- target_stat_values[++index] = atomic_read(&qps_created);
32430- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32431- target_stat_values[++index] = atomic_read(&qps_destroyed);
32432- target_stat_values[++index] = atomic_read(&cm_closes);
32433+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32434+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32435+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32436+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32437+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32438+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32439+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32440+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32441+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32442+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32443+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32444 target_stat_values[++index] = cm_packets_sent;
32445 target_stat_values[++index] = cm_packets_bounced;
32446 target_stat_values[++index] = cm_packets_created;
32447 target_stat_values[++index] = cm_packets_received;
32448 target_stat_values[++index] = cm_packets_dropped;
32449 target_stat_values[++index] = cm_packets_retrans;
32450- target_stat_values[++index] = atomic_read(&cm_listens_created);
32451- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
32452+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
32453+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
32454 target_stat_values[++index] = cm_backlog_drops;
32455- target_stat_values[++index] = atomic_read(&cm_loopbacks);
32456- target_stat_values[++index] = atomic_read(&cm_nodes_created);
32457- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32458- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32459- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32460+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32461+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32462+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32463+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32464+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32465 target_stat_values[++index] = nesadapter->free_4kpbl;
32466 target_stat_values[++index] = nesadapter->free_256pbl;
32467 target_stat_values[++index] = int_mod_timer_init;
32468 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
32469 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
32470 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
32471- target_stat_values[++index] = atomic_read(&pau_qps_created);
32472- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
32473+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
32474+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
32475 }
32476
32477 /**
32478diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
32479index 0927b5c..ed67986 100644
32480--- a/drivers/infiniband/hw/nes/nes_verbs.c
32481+++ b/drivers/infiniband/hw/nes/nes_verbs.c
32482@@ -46,9 +46,9 @@
32483
32484 #include <rdma/ib_umem.h>
32485
32486-atomic_t mod_qp_timouts;
32487-atomic_t qps_created;
32488-atomic_t sw_qps_destroyed;
32489+atomic_unchecked_t mod_qp_timouts;
32490+atomic_unchecked_t qps_created;
32491+atomic_unchecked_t sw_qps_destroyed;
32492
32493 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32494
32495@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
32496 if (init_attr->create_flags)
32497 return ERR_PTR(-EINVAL);
32498
32499- atomic_inc(&qps_created);
32500+ atomic_inc_unchecked(&qps_created);
32501 switch (init_attr->qp_type) {
32502 case IB_QPT_RC:
32503 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
32504@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
32505 struct iw_cm_event cm_event;
32506 int ret = 0;
32507
32508- atomic_inc(&sw_qps_destroyed);
32509+ atomic_inc_unchecked(&sw_qps_destroyed);
32510 nesqp->destroyed = 1;
32511
32512 /* Blow away the connection if it exists. */
32513diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
32514index b881bdc..c2e360c 100644
32515--- a/drivers/infiniband/hw/qib/qib.h
32516+++ b/drivers/infiniband/hw/qib/qib.h
32517@@ -51,6 +51,7 @@
32518 #include <linux/completion.h>
32519 #include <linux/kref.h>
32520 #include <linux/sched.h>
32521+#include <linux/slab.h>
32522
32523 #include "qib_common.h"
32524 #include "qib_verbs.h"
32525diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
32526index 05e0f17..0275789 100644
32527--- a/drivers/infiniband/hw/qib/qib_fs.c
32528+++ b/drivers/infiniband/hw/qib/qib_fs.c
32529@@ -267,6 +267,8 @@ static const struct file_operations qsfp_ops[] = {
32530 };
32531
32532 static ssize_t flash_read(struct file *file, char __user *buf,
32533+ size_t count, loff_t *ppos) __size_overflow(3);
32534+static ssize_t flash_read(struct file *file, char __user *buf,
32535 size_t count, loff_t *ppos)
32536 {
32537 struct qib_devdata *dd;
32538@@ -318,6 +320,8 @@ bail:
32539 }
32540
32541 static ssize_t flash_write(struct file *file, const char __user *buf,
32542+ size_t count, loff_t *ppos) __size_overflow(3);
32543+static ssize_t flash_write(struct file *file, const char __user *buf,
32544 size_t count, loff_t *ppos)
32545 {
32546 struct qib_devdata *dd;
32547diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32548index c351aa4..e6967c2 100644
32549--- a/drivers/input/gameport/gameport.c
32550+++ b/drivers/input/gameport/gameport.c
32551@@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32552 */
32553 static void gameport_init_port(struct gameport *gameport)
32554 {
32555- static atomic_t gameport_no = ATOMIC_INIT(0);
32556+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32557
32558 __module_get(THIS_MODULE);
32559
32560 mutex_init(&gameport->drv_mutex);
32561 device_initialize(&gameport->dev);
32562 dev_set_name(&gameport->dev, "gameport%lu",
32563- (unsigned long)atomic_inc_return(&gameport_no) - 1);
32564+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32565 gameport->dev.bus = &gameport_bus;
32566 gameport->dev.release = gameport_release_port;
32567 if (gameport->parent)
32568diff --git a/drivers/input/input.c b/drivers/input/input.c
32569index 1f78c95..3cddc6c 100644
32570--- a/drivers/input/input.c
32571+++ b/drivers/input/input.c
32572@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32573 */
32574 int input_register_device(struct input_dev *dev)
32575 {
32576- static atomic_t input_no = ATOMIC_INIT(0);
32577+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32578 struct input_handler *handler;
32579 const char *path;
32580 int error;
32581@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32582 dev->setkeycode = input_default_setkeycode;
32583
32584 dev_set_name(&dev->dev, "input%ld",
32585- (unsigned long) atomic_inc_return(&input_no) - 1);
32586+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32587
32588 error = device_add(&dev->dev);
32589 if (error)
32590diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32591index b8d8611..7a4a04b 100644
32592--- a/drivers/input/joystick/sidewinder.c
32593+++ b/drivers/input/joystick/sidewinder.c
32594@@ -30,6 +30,7 @@
32595 #include <linux/kernel.h>
32596 #include <linux/module.h>
32597 #include <linux/slab.h>
32598+#include <linux/sched.h>
32599 #include <linux/init.h>
32600 #include <linux/input.h>
32601 #include <linux/gameport.h>
32602diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32603index fd7a0d5..a4af10c 100644
32604--- a/drivers/input/joystick/xpad.c
32605+++ b/drivers/input/joystick/xpad.c
32606@@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32607
32608 static int xpad_led_probe(struct usb_xpad *xpad)
32609 {
32610- static atomic_t led_seq = ATOMIC_INIT(0);
32611+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32612 long led_no;
32613 struct xpad_led *led;
32614 struct led_classdev *led_cdev;
32615@@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32616 if (!led)
32617 return -ENOMEM;
32618
32619- led_no = (long)atomic_inc_return(&led_seq) - 1;
32620+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32621
32622 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32623 led->xpad = xpad;
32624diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32625index 0110b5a..d3ad144 100644
32626--- a/drivers/input/mousedev.c
32627+++ b/drivers/input/mousedev.c
32628@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32629
32630 spin_unlock_irq(&client->packet_lock);
32631
32632- if (copy_to_user(buffer, data, count))
32633+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
32634 return -EFAULT;
32635
32636 return count;
32637diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32638index ba70058..571d25d 100644
32639--- a/drivers/input/serio/serio.c
32640+++ b/drivers/input/serio/serio.c
32641@@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
32642 */
32643 static void serio_init_port(struct serio *serio)
32644 {
32645- static atomic_t serio_no = ATOMIC_INIT(0);
32646+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32647
32648 __module_get(THIS_MODULE);
32649
32650@@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
32651 mutex_init(&serio->drv_mutex);
32652 device_initialize(&serio->dev);
32653 dev_set_name(&serio->dev, "serio%ld",
32654- (long)atomic_inc_return(&serio_no) - 1);
32655+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
32656 serio->dev.bus = &serio_bus;
32657 serio->dev.release = serio_release_port;
32658 serio->dev.groups = serio_device_attr_groups;
32659diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32660index e44933d..9ba484a 100644
32661--- a/drivers/isdn/capi/capi.c
32662+++ b/drivers/isdn/capi/capi.c
32663@@ -83,8 +83,8 @@ struct capiminor {
32664
32665 struct capi20_appl *ap;
32666 u32 ncci;
32667- atomic_t datahandle;
32668- atomic_t msgid;
32669+ atomic_unchecked_t datahandle;
32670+ atomic_unchecked_t msgid;
32671
32672 struct tty_port port;
32673 int ttyinstop;
32674@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32675 capimsg_setu16(s, 2, mp->ap->applid);
32676 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32677 capimsg_setu8 (s, 5, CAPI_RESP);
32678- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32679+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32680 capimsg_setu32(s, 8, mp->ncci);
32681 capimsg_setu16(s, 12, datahandle);
32682 }
32683@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
32684 mp->outbytes -= len;
32685 spin_unlock_bh(&mp->outlock);
32686
32687- datahandle = atomic_inc_return(&mp->datahandle);
32688+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32689 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32690 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32691 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32692 capimsg_setu16(skb->data, 2, mp->ap->applid);
32693 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32694 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32695- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32696+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32697 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32698 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32699 capimsg_setu16(skb->data, 16, len); /* Data length */
32700diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
32701index db621db..825ea1a 100644
32702--- a/drivers/isdn/gigaset/common.c
32703+++ b/drivers/isdn/gigaset/common.c
32704@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
32705 cs->commands_pending = 0;
32706 cs->cur_at_seq = 0;
32707 cs->gotfwver = -1;
32708- cs->open_count = 0;
32709+ local_set(&cs->open_count, 0);
32710 cs->dev = NULL;
32711 cs->tty = NULL;
32712 cs->tty_dev = NULL;
32713diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
32714index 212efaf..f187c6b 100644
32715--- a/drivers/isdn/gigaset/gigaset.h
32716+++ b/drivers/isdn/gigaset/gigaset.h
32717@@ -35,6 +35,7 @@
32718 #include <linux/tty_driver.h>
32719 #include <linux/list.h>
32720 #include <linux/atomic.h>
32721+#include <asm/local.h>
32722
32723 #define GIG_VERSION {0, 5, 0, 0}
32724 #define GIG_COMPAT {0, 4, 0, 0}
32725@@ -433,7 +434,7 @@ struct cardstate {
32726 spinlock_t cmdlock;
32727 unsigned curlen, cmdbytes;
32728
32729- unsigned open_count;
32730+ local_t open_count;
32731 struct tty_struct *tty;
32732 struct tasklet_struct if_wake_tasklet;
32733 unsigned control_state;
32734diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
32735index ee0a549..a7c9798 100644
32736--- a/drivers/isdn/gigaset/interface.c
32737+++ b/drivers/isdn/gigaset/interface.c
32738@@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
32739 }
32740 tty->driver_data = cs;
32741
32742- ++cs->open_count;
32743-
32744- if (cs->open_count == 1) {
32745+ if (local_inc_return(&cs->open_count) == 1) {
32746 spin_lock_irqsave(&cs->lock, flags);
32747 cs->tty = tty;
32748 spin_unlock_irqrestore(&cs->lock, flags);
32749@@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
32750
32751 if (!cs->connected)
32752 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32753- else if (!cs->open_count)
32754+ else if (!local_read(&cs->open_count))
32755 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32756 else {
32757- if (!--cs->open_count) {
32758+ if (!local_dec_return(&cs->open_count)) {
32759 spin_lock_irqsave(&cs->lock, flags);
32760 cs->tty = NULL;
32761 spin_unlock_irqrestore(&cs->lock, flags);
32762@@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
32763 if (!cs->connected) {
32764 gig_dbg(DEBUG_IF, "not connected");
32765 retval = -ENODEV;
32766- } else if (!cs->open_count)
32767+ } else if (!local_read(&cs->open_count))
32768 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32769 else {
32770 retval = 0;
32771@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
32772 retval = -ENODEV;
32773 goto done;
32774 }
32775- if (!cs->open_count) {
32776+ if (!local_read(&cs->open_count)) {
32777 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32778 retval = -ENODEV;
32779 goto done;
32780@@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
32781 if (!cs->connected) {
32782 gig_dbg(DEBUG_IF, "not connected");
32783 retval = -ENODEV;
32784- } else if (!cs->open_count)
32785+ } else if (!local_read(&cs->open_count))
32786 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32787 else if (cs->mstate != MS_LOCKED) {
32788 dev_warn(cs->dev, "can't write to unlocked device\n");
32789@@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
32790
32791 if (!cs->connected)
32792 gig_dbg(DEBUG_IF, "not connected");
32793- else if (!cs->open_count)
32794+ else if (!local_read(&cs->open_count))
32795 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32796 else if (cs->mstate != MS_LOCKED)
32797 dev_warn(cs->dev, "can't write to unlocked device\n");
32798@@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
32799
32800 if (!cs->connected)
32801 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32802- else if (!cs->open_count)
32803+ else if (!local_read(&cs->open_count))
32804 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32805 else
32806 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
32807@@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
32808
32809 if (!cs->connected)
32810 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
32811- else if (!cs->open_count)
32812+ else if (!local_read(&cs->open_count))
32813 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32814 else
32815 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
32816@@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
32817 goto out;
32818 }
32819
32820- if (!cs->open_count) {
32821+ if (!local_read(&cs->open_count)) {
32822 dev_warn(cs->dev, "%s: device not opened\n", __func__);
32823 goto out;
32824 }
32825diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
32826index 2a57da59..e7a12ed 100644
32827--- a/drivers/isdn/hardware/avm/b1.c
32828+++ b/drivers/isdn/hardware/avm/b1.c
32829@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
32830 }
32831 if (left) {
32832 if (t4file->user) {
32833- if (copy_from_user(buf, dp, left))
32834+ if (left > sizeof buf || copy_from_user(buf, dp, left))
32835 return -EFAULT;
32836 } else {
32837 memcpy(buf, dp, left);
32838@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
32839 }
32840 if (left) {
32841 if (config->user) {
32842- if (copy_from_user(buf, dp, left))
32843+ if (left > sizeof buf || copy_from_user(buf, dp, left))
32844 return -EFAULT;
32845 } else {
32846 memcpy(buf, dp, left);
32847diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
32848index 85784a7..a19ca98 100644
32849--- a/drivers/isdn/hardware/eicon/divasync.h
32850+++ b/drivers/isdn/hardware/eicon/divasync.h
32851@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32852 } diva_didd_add_adapter_t;
32853 typedef struct _diva_didd_remove_adapter {
32854 IDI_CALL p_request;
32855-} diva_didd_remove_adapter_t;
32856+} __no_const diva_didd_remove_adapter_t;
32857 typedef struct _diva_didd_read_adapter_array {
32858 void * buffer;
32859 dword length;
32860diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
32861index a3bd163..8956575 100644
32862--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
32863+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
32864@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32865 typedef struct _diva_os_idi_adapter_interface {
32866 diva_init_card_proc_t cleanup_adapter_proc;
32867 diva_cmd_card_proc_t cmd_proc;
32868-} diva_os_idi_adapter_interface_t;
32869+} __no_const diva_os_idi_adapter_interface_t;
32870
32871 typedef struct _diva_os_xdi_adapter {
32872 struct list_head link;
32873diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
32874index 1f355bb..43f1fea 100644
32875--- a/drivers/isdn/icn/icn.c
32876+++ b/drivers/isdn/icn/icn.c
32877@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
32878 if (count > len)
32879 count = len;
32880 if (user) {
32881- if (copy_from_user(msg, buf, count))
32882+ if (count > sizeof msg || copy_from_user(msg, buf, count))
32883 return -EFAULT;
32884 } else
32885 memcpy(msg, buf, count);
32886diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
32887index b5fdcb7..5b6c59f 100644
32888--- a/drivers/lguest/core.c
32889+++ b/drivers/lguest/core.c
32890@@ -92,9 +92,17 @@ static __init int map_switcher(void)
32891 * it's worked so far. The end address needs +1 because __get_vm_area
32892 * allocates an extra guard page, so we need space for that.
32893 */
32894+
32895+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32896+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32897+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32898+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32899+#else
32900 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32901 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32902 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32903+#endif
32904+
32905 if (!switcher_vma) {
32906 err = -ENOMEM;
32907 printk("lguest: could not map switcher pages high\n");
32908@@ -119,7 +127,7 @@ static __init int map_switcher(void)
32909 * Now the Switcher is mapped at the right address, we can't fail!
32910 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
32911 */
32912- memcpy(switcher_vma->addr, start_switcher_text,
32913+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32914 end_switcher_text - start_switcher_text);
32915
32916 printk(KERN_INFO "lguest: mapped switcher at %p\n",
32917diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
32918index ff4a0bc..f5fdd9c 100644
32919--- a/drivers/lguest/lguest_user.c
32920+++ b/drivers/lguest/lguest_user.c
32921@@ -198,6 +198,7 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
32922 * Once our Guest is initialized, the Launcher makes it run by reading
32923 * from /dev/lguest.
32924 */
32925+static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) __size_overflow(3);
32926 static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
32927 {
32928 struct lguest *lg = file->private_data;
32929diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
32930index 3980903..ce25c5e 100644
32931--- a/drivers/lguest/x86/core.c
32932+++ b/drivers/lguest/x86/core.c
32933@@ -59,7 +59,7 @@ static struct {
32934 /* Offset from where switcher.S was compiled to where we've copied it */
32935 static unsigned long switcher_offset(void)
32936 {
32937- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
32938+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
32939 }
32940
32941 /* This cpu's struct lguest_pages. */
32942@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
32943 * These copies are pretty cheap, so we do them unconditionally: */
32944 /* Save the current Host top-level page directory.
32945 */
32946+
32947+#ifdef CONFIG_PAX_PER_CPU_PGD
32948+ pages->state.host_cr3 = read_cr3();
32949+#else
32950 pages->state.host_cr3 = __pa(current->mm->pgd);
32951+#endif
32952+
32953 /*
32954 * Set up the Guest's page tables to see this CPU's pages (and no
32955 * other CPU's pages).
32956@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
32957 * compiled-in switcher code and the high-mapped copy we just made.
32958 */
32959 for (i = 0; i < IDT_ENTRIES; i++)
32960- default_idt_entries[i] += switcher_offset();
32961+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
32962
32963 /*
32964 * Set up the Switcher's per-cpu areas.
32965@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
32966 * it will be undisturbed when we switch. To change %cs and jump we
32967 * need this structure to feed to Intel's "lcall" instruction.
32968 */
32969- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
32970+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
32971 lguest_entry.segment = LGUEST_CS;
32972
32973 /*
32974diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
32975index 40634b0..4f5855e 100644
32976--- a/drivers/lguest/x86/switcher_32.S
32977+++ b/drivers/lguest/x86/switcher_32.S
32978@@ -87,6 +87,7 @@
32979 #include <asm/page.h>
32980 #include <asm/segment.h>
32981 #include <asm/lguest.h>
32982+#include <asm/processor-flags.h>
32983
32984 // We mark the start of the code to copy
32985 // It's placed in .text tho it's never run here
32986@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
32987 // Changes type when we load it: damn Intel!
32988 // For after we switch over our page tables
32989 // That entry will be read-only: we'd crash.
32990+
32991+#ifdef CONFIG_PAX_KERNEXEC
32992+ mov %cr0, %edx
32993+ xor $X86_CR0_WP, %edx
32994+ mov %edx, %cr0
32995+#endif
32996+
32997 movl $(GDT_ENTRY_TSS*8), %edx
32998 ltr %dx
32999
33000@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
33001 // Let's clear it again for our return.
33002 // The GDT descriptor of the Host
33003 // Points to the table after two "size" bytes
33004- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
33005+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
33006 // Clear "used" from type field (byte 5, bit 2)
33007- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
33008+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
33009+
33010+#ifdef CONFIG_PAX_KERNEXEC
33011+ mov %cr0, %eax
33012+ xor $X86_CR0_WP, %eax
33013+ mov %eax, %cr0
33014+#endif
33015
33016 // Once our page table's switched, the Guest is live!
33017 // The Host fades as we run this final step.
33018@@ -295,13 +309,12 @@ deliver_to_host:
33019 // I consulted gcc, and it gave
33020 // These instructions, which I gladly credit:
33021 leal (%edx,%ebx,8), %eax
33022- movzwl (%eax),%edx
33023- movl 4(%eax), %eax
33024- xorw %ax, %ax
33025- orl %eax, %edx
33026+ movl 4(%eax), %edx
33027+ movw (%eax), %dx
33028 // Now the address of the handler's in %edx
33029 // We call it now: its "iret" drops us home.
33030- jmp *%edx
33031+ ljmp $__KERNEL_CS, $1f
33032+1: jmp *%edx
33033
33034 // Every interrupt can come to us here
33035 // But we must truly tell each apart.
33036diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
33037index 4daf9e5..b8d1d0f 100644
33038--- a/drivers/macintosh/macio_asic.c
33039+++ b/drivers/macintosh/macio_asic.c
33040@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
33041 * MacIO is matched against any Apple ID, it's probe() function
33042 * will then decide wether it applies or not
33043 */
33044-static const struct pci_device_id __devinitdata pci_ids [] = { {
33045+static const struct pci_device_id __devinitconst pci_ids [] = { {
33046 .vendor = PCI_VENDOR_ID_APPLE,
33047 .device = PCI_ANY_ID,
33048 .subvendor = PCI_ANY_ID,
33049diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
33050index 1ce84ed..0fdd40a 100644
33051--- a/drivers/md/dm-ioctl.c
33052+++ b/drivers/md/dm-ioctl.c
33053@@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
33054 cmd == DM_LIST_VERSIONS_CMD)
33055 return 0;
33056
33057- if ((cmd == DM_DEV_CREATE_CMD)) {
33058+ if (cmd == DM_DEV_CREATE_CMD) {
33059 if (!*param->name) {
33060 DMWARN("name not supplied when creating device");
33061 return -EINVAL;
33062diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
33063index 9bfd057..5373ff3 100644
33064--- a/drivers/md/dm-raid1.c
33065+++ b/drivers/md/dm-raid1.c
33066@@ -40,7 +40,7 @@ enum dm_raid1_error {
33067
33068 struct mirror {
33069 struct mirror_set *ms;
33070- atomic_t error_count;
33071+ atomic_unchecked_t error_count;
33072 unsigned long error_type;
33073 struct dm_dev *dev;
33074 sector_t offset;
33075@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
33076 struct mirror *m;
33077
33078 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
33079- if (!atomic_read(&m->error_count))
33080+ if (!atomic_read_unchecked(&m->error_count))
33081 return m;
33082
33083 return NULL;
33084@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
33085 * simple way to tell if a device has encountered
33086 * errors.
33087 */
33088- atomic_inc(&m->error_count);
33089+ atomic_inc_unchecked(&m->error_count);
33090
33091 if (test_and_set_bit(error_type, &m->error_type))
33092 return;
33093@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
33094 struct mirror *m = get_default_mirror(ms);
33095
33096 do {
33097- if (likely(!atomic_read(&m->error_count)))
33098+ if (likely(!atomic_read_unchecked(&m->error_count)))
33099 return m;
33100
33101 if (m-- == ms->mirror)
33102@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
33103 {
33104 struct mirror *default_mirror = get_default_mirror(m->ms);
33105
33106- return !atomic_read(&default_mirror->error_count);
33107+ return !atomic_read_unchecked(&default_mirror->error_count);
33108 }
33109
33110 static int mirror_available(struct mirror_set *ms, struct bio *bio)
33111@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
33112 */
33113 if (likely(region_in_sync(ms, region, 1)))
33114 m = choose_mirror(ms, bio->bi_sector);
33115- else if (m && atomic_read(&m->error_count))
33116+ else if (m && atomic_read_unchecked(&m->error_count))
33117 m = NULL;
33118
33119 if (likely(m))
33120@@ -848,6 +848,10 @@ static void do_mirror(struct work_struct *work)
33121 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
33122 uint32_t region_size,
33123 struct dm_target *ti,
33124+ struct dm_dirty_log *dl) __size_overflow(1);
33125+static struct mirror_set *alloc_context(unsigned int nr_mirrors,
33126+ uint32_t region_size,
33127+ struct dm_target *ti,
33128 struct dm_dirty_log *dl)
33129 {
33130 size_t len;
33131@@ -937,7 +941,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
33132 }
33133
33134 ms->mirror[mirror].ms = ms;
33135- atomic_set(&(ms->mirror[mirror].error_count), 0);
33136+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33137 ms->mirror[mirror].error_type = 0;
33138 ms->mirror[mirror].offset = offset;
33139
33140@@ -1347,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
33141 */
33142 static char device_status_char(struct mirror *m)
33143 {
33144- if (!atomic_read(&(m->error_count)))
33145+ if (!atomic_read_unchecked(&(m->error_count)))
33146 return 'A';
33147
33148 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
33149diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
33150index 3d80cf0..7d98e1a 100644
33151--- a/drivers/md/dm-stripe.c
33152+++ b/drivers/md/dm-stripe.c
33153@@ -20,7 +20,7 @@ struct stripe {
33154 struct dm_dev *dev;
33155 sector_t physical_start;
33156
33157- atomic_t error_count;
33158+ atomic_unchecked_t error_count;
33159 };
33160
33161 struct stripe_c {
33162@@ -55,6 +55,7 @@ static void trigger_event(struct work_struct *work)
33163 dm_table_event(sc->ti->table);
33164 }
33165
33166+static inline struct stripe_c *alloc_context(unsigned int stripes) __size_overflow(1);
33167 static inline struct stripe_c *alloc_context(unsigned int stripes)
33168 {
33169 size_t len;
33170@@ -192,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
33171 kfree(sc);
33172 return r;
33173 }
33174- atomic_set(&(sc->stripe[i].error_count), 0);
33175+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33176 }
33177
33178 ti->private = sc;
33179@@ -314,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
33180 DMEMIT("%d ", sc->stripes);
33181 for (i = 0; i < sc->stripes; i++) {
33182 DMEMIT("%s ", sc->stripe[i].dev->name);
33183- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33184+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33185 'D' : 'A';
33186 }
33187 buffer[i] = '\0';
33188@@ -361,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
33189 */
33190 for (i = 0; i < sc->stripes; i++)
33191 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33192- atomic_inc(&(sc->stripe[i].error_count));
33193- if (atomic_read(&(sc->stripe[i].error_count)) <
33194+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
33195+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33196 DM_IO_ERROR_THRESHOLD)
33197 schedule_work(&sc->trigger_event);
33198 }
33199diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
33200index 63cc542..8d45caf3 100644
33201--- a/drivers/md/dm-table.c
33202+++ b/drivers/md/dm-table.c
33203@@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
33204 if (!dev_size)
33205 return 0;
33206
33207- if ((start >= dev_size) || (start + len > dev_size)) {
33208+ if ((start >= dev_size) || (len > dev_size - start)) {
33209 DMWARN("%s: %s too small for target: "
33210 "start=%llu, len=%llu, dev_size=%llu",
33211 dm_device_name(ti->table->md), bdevname(bdev, b),
33212diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33213index 237571a..fb6d19b 100644
33214--- a/drivers/md/dm-thin-metadata.c
33215+++ b/drivers/md/dm-thin-metadata.c
33216@@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33217
33218 pmd->info.tm = tm;
33219 pmd->info.levels = 2;
33220- pmd->info.value_type.context = pmd->data_sm;
33221+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33222 pmd->info.value_type.size = sizeof(__le64);
33223 pmd->info.value_type.inc = data_block_inc;
33224 pmd->info.value_type.dec = data_block_dec;
33225@@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33226
33227 pmd->bl_info.tm = tm;
33228 pmd->bl_info.levels = 1;
33229- pmd->bl_info.value_type.context = pmd->data_sm;
33230+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33231 pmd->bl_info.value_type.size = sizeof(__le64);
33232 pmd->bl_info.value_type.inc = data_block_inc;
33233 pmd->bl_info.value_type.dec = data_block_dec;
33234diff --git a/drivers/md/dm.c b/drivers/md/dm.c
33235index b89c548..2af3ce4 100644
33236--- a/drivers/md/dm.c
33237+++ b/drivers/md/dm.c
33238@@ -176,9 +176,9 @@ struct mapped_device {
33239 /*
33240 * Event handling.
33241 */
33242- atomic_t event_nr;
33243+ atomic_unchecked_t event_nr;
33244 wait_queue_head_t eventq;
33245- atomic_t uevent_seq;
33246+ atomic_unchecked_t uevent_seq;
33247 struct list_head uevent_list;
33248 spinlock_t uevent_lock; /* Protect access to uevent_list */
33249
33250@@ -1844,8 +1844,8 @@ static struct mapped_device *alloc_dev(int minor)
33251 rwlock_init(&md->map_lock);
33252 atomic_set(&md->holders, 1);
33253 atomic_set(&md->open_count, 0);
33254- atomic_set(&md->event_nr, 0);
33255- atomic_set(&md->uevent_seq, 0);
33256+ atomic_set_unchecked(&md->event_nr, 0);
33257+ atomic_set_unchecked(&md->uevent_seq, 0);
33258 INIT_LIST_HEAD(&md->uevent_list);
33259 spin_lock_init(&md->uevent_lock);
33260
33261@@ -1979,7 +1979,7 @@ static void event_callback(void *context)
33262
33263 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33264
33265- atomic_inc(&md->event_nr);
33266+ atomic_inc_unchecked(&md->event_nr);
33267 wake_up(&md->eventq);
33268 }
33269
33270@@ -2621,18 +2621,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
33271
33272 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33273 {
33274- return atomic_add_return(1, &md->uevent_seq);
33275+ return atomic_add_return_unchecked(1, &md->uevent_seq);
33276 }
33277
33278 uint32_t dm_get_event_nr(struct mapped_device *md)
33279 {
33280- return atomic_read(&md->event_nr);
33281+ return atomic_read_unchecked(&md->event_nr);
33282 }
33283
33284 int dm_wait_event(struct mapped_device *md, int event_nr)
33285 {
33286 return wait_event_interruptible(md->eventq,
33287- (event_nr != atomic_read(&md->event_nr)));
33288+ (event_nr != atomic_read_unchecked(&md->event_nr)));
33289 }
33290
33291 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
33292diff --git a/drivers/md/md.c b/drivers/md/md.c
33293index 6acc846..80a6b96 100644
33294--- a/drivers/md/md.c
33295+++ b/drivers/md/md.c
33296@@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
33297 * start build, activate spare
33298 */
33299 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33300-static atomic_t md_event_count;
33301+static atomic_unchecked_t md_event_count;
33302 void md_new_event(struct mddev *mddev)
33303 {
33304- atomic_inc(&md_event_count);
33305+ atomic_inc_unchecked(&md_event_count);
33306 wake_up(&md_event_waiters);
33307 }
33308 EXPORT_SYMBOL_GPL(md_new_event);
33309@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
33310 */
33311 static void md_new_event_inintr(struct mddev *mddev)
33312 {
33313- atomic_inc(&md_event_count);
33314+ atomic_inc_unchecked(&md_event_count);
33315 wake_up(&md_event_waiters);
33316 }
33317
33318@@ -1524,7 +1524,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
33319
33320 rdev->preferred_minor = 0xffff;
33321 rdev->data_offset = le64_to_cpu(sb->data_offset);
33322- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33323+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33324
33325 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33326 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
33327@@ -1743,7 +1743,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
33328 else
33329 sb->resync_offset = cpu_to_le64(0);
33330
33331- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33332+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33333
33334 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33335 sb->size = cpu_to_le64(mddev->dev_sectors);
33336@@ -2689,7 +2689,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
33337 static ssize_t
33338 errors_show(struct md_rdev *rdev, char *page)
33339 {
33340- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33341+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33342 }
33343
33344 static ssize_t
33345@@ -2698,7 +2698,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
33346 char *e;
33347 unsigned long n = simple_strtoul(buf, &e, 10);
33348 if (*buf && (*e == 0 || *e == '\n')) {
33349- atomic_set(&rdev->corrected_errors, n);
33350+ atomic_set_unchecked(&rdev->corrected_errors, n);
33351 return len;
33352 }
33353 return -EINVAL;
33354@@ -3084,8 +3084,8 @@ int md_rdev_init(struct md_rdev *rdev)
33355 rdev->sb_loaded = 0;
33356 rdev->bb_page = NULL;
33357 atomic_set(&rdev->nr_pending, 0);
33358- atomic_set(&rdev->read_errors, 0);
33359- atomic_set(&rdev->corrected_errors, 0);
33360+ atomic_set_unchecked(&rdev->read_errors, 0);
33361+ atomic_set_unchecked(&rdev->corrected_errors, 0);
33362
33363 INIT_LIST_HEAD(&rdev->same_set);
33364 init_waitqueue_head(&rdev->blocked_wait);
33365@@ -6736,7 +6736,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33366
33367 spin_unlock(&pers_lock);
33368 seq_printf(seq, "\n");
33369- seq->poll_event = atomic_read(&md_event_count);
33370+ seq->poll_event = atomic_read_unchecked(&md_event_count);
33371 return 0;
33372 }
33373 if (v == (void*)2) {
33374@@ -6828,7 +6828,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33375 chunk_kb ? "KB" : "B");
33376 if (bitmap->file) {
33377 seq_printf(seq, ", file: ");
33378- seq_path(seq, &bitmap->file->f_path, " \t\n");
33379+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
33380 }
33381
33382 seq_printf(seq, "\n");
33383@@ -6859,7 +6859,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
33384 return error;
33385
33386 seq = file->private_data;
33387- seq->poll_event = atomic_read(&md_event_count);
33388+ seq->poll_event = atomic_read_unchecked(&md_event_count);
33389 return error;
33390 }
33391
33392@@ -6873,7 +6873,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
33393 /* always allow read */
33394 mask = POLLIN | POLLRDNORM;
33395
33396- if (seq->poll_event != atomic_read(&md_event_count))
33397+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
33398 mask |= POLLERR | POLLPRI;
33399 return mask;
33400 }
33401@@ -6917,7 +6917,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
33402 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33403 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33404 (int)part_stat_read(&disk->part0, sectors[1]) -
33405- atomic_read(&disk->sync_io);
33406+ atomic_read_unchecked(&disk->sync_io);
33407 /* sync IO will cause sync_io to increase before the disk_stats
33408 * as sync_io is counted when a request starts, and
33409 * disk_stats is counted when it completes.
33410diff --git a/drivers/md/md.h b/drivers/md/md.h
33411index 44c63df..b795d1a 100644
33412--- a/drivers/md/md.h
33413+++ b/drivers/md/md.h
33414@@ -93,13 +93,13 @@ struct md_rdev {
33415 * only maintained for arrays that
33416 * support hot removal
33417 */
33418- atomic_t read_errors; /* number of consecutive read errors that
33419+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
33420 * we have tried to ignore.
33421 */
33422 struct timespec last_read_error; /* monotonic time since our
33423 * last read error
33424 */
33425- atomic_t corrected_errors; /* number of corrected read errors,
33426+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33427 * for reporting to userspace and storing
33428 * in superblock.
33429 */
33430@@ -421,7 +421,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
33431
33432 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33433 {
33434- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33435+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33436 }
33437
33438 struct md_personality
33439diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
33440index 50ed53b..4f29d7d 100644
33441--- a/drivers/md/persistent-data/dm-space-map-checker.c
33442+++ b/drivers/md/persistent-data/dm-space-map-checker.c
33443@@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
33444 /*----------------------------------------------------------------*/
33445
33446 struct sm_checker {
33447- struct dm_space_map sm;
33448+ dm_space_map_no_const sm;
33449
33450 struct count_array old_counts;
33451 struct count_array counts;
33452diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
33453index fc469ba..2d91555 100644
33454--- a/drivers/md/persistent-data/dm-space-map-disk.c
33455+++ b/drivers/md/persistent-data/dm-space-map-disk.c
33456@@ -23,7 +23,7 @@
33457 * Space map interface.
33458 */
33459 struct sm_disk {
33460- struct dm_space_map sm;
33461+ dm_space_map_no_const sm;
33462
33463 struct ll_disk ll;
33464 struct ll_disk old_ll;
33465diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
33466index e89ae5e..062e4c2 100644
33467--- a/drivers/md/persistent-data/dm-space-map-metadata.c
33468+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
33469@@ -43,7 +43,7 @@ struct block_op {
33470 };
33471
33472 struct sm_metadata {
33473- struct dm_space_map sm;
33474+ dm_space_map_no_const sm;
33475
33476 struct ll_disk ll;
33477 struct ll_disk old_ll;
33478diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
33479index 1cbfc6b..56e1dbb 100644
33480--- a/drivers/md/persistent-data/dm-space-map.h
33481+++ b/drivers/md/persistent-data/dm-space-map.h
33482@@ -60,6 +60,7 @@ struct dm_space_map {
33483 int (*root_size)(struct dm_space_map *sm, size_t *result);
33484 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
33485 };
33486+typedef struct dm_space_map __no_const dm_space_map_no_const;
33487
33488 /*----------------------------------------------------------------*/
33489
33490diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
33491index 118e0f6..210c4d7 100644
33492--- a/drivers/md/raid1.c
33493+++ b/drivers/md/raid1.c
33494@@ -1645,7 +1645,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
33495 if (r1_sync_page_io(rdev, sect, s,
33496 bio->bi_io_vec[idx].bv_page,
33497 READ) != 0)
33498- atomic_add(s, &rdev->corrected_errors);
33499+ atomic_add_unchecked(s, &rdev->corrected_errors);
33500 }
33501 sectors -= s;
33502 sect += s;
33503@@ -1858,7 +1858,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
33504 test_bit(In_sync, &rdev->flags)) {
33505 if (r1_sync_page_io(rdev, sect, s,
33506 conf->tmppage, READ)) {
33507- atomic_add(s, &rdev->corrected_errors);
33508+ atomic_add_unchecked(s, &rdev->corrected_errors);
33509 printk(KERN_INFO
33510 "md/raid1:%s: read error corrected "
33511 "(%d sectors at %llu on %s)\n",
33512diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
33513index be7101d..f23ba30 100644
33514--- a/drivers/md/raid10.c
33515+++ b/drivers/md/raid10.c
33516@@ -1636,7 +1636,7 @@ static void end_sync_read(struct bio *bio, int error)
33517 /* The write handler will notice the lack of
33518 * R10BIO_Uptodate and record any errors etc
33519 */
33520- atomic_add(r10_bio->sectors,
33521+ atomic_add_unchecked(r10_bio->sectors,
33522 &conf->mirrors[d].rdev->corrected_errors);
33523
33524 /* for reconstruct, we always reschedule after a read.
33525@@ -1987,7 +1987,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33526 {
33527 struct timespec cur_time_mon;
33528 unsigned long hours_since_last;
33529- unsigned int read_errors = atomic_read(&rdev->read_errors);
33530+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
33531
33532 ktime_get_ts(&cur_time_mon);
33533
33534@@ -2009,9 +2009,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33535 * overflowing the shift of read_errors by hours_since_last.
33536 */
33537 if (hours_since_last >= 8 * sizeof(read_errors))
33538- atomic_set(&rdev->read_errors, 0);
33539+ atomic_set_unchecked(&rdev->read_errors, 0);
33540 else
33541- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
33542+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
33543 }
33544
33545 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
33546@@ -2065,8 +2065,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33547 return;
33548
33549 check_decay_read_errors(mddev, rdev);
33550- atomic_inc(&rdev->read_errors);
33551- if (atomic_read(&rdev->read_errors) > max_read_errors) {
33552+ atomic_inc_unchecked(&rdev->read_errors);
33553+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33554 char b[BDEVNAME_SIZE];
33555 bdevname(rdev->bdev, b);
33556
33557@@ -2074,7 +2074,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33558 "md/raid10:%s: %s: Raid device exceeded "
33559 "read_error threshold [cur %d:max %d]\n",
33560 mdname(mddev), b,
33561- atomic_read(&rdev->read_errors), max_read_errors);
33562+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33563 printk(KERN_NOTICE
33564 "md/raid10:%s: %s: Failing raid device\n",
33565 mdname(mddev), b);
33566@@ -2223,7 +2223,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33567 (unsigned long long)(
33568 sect + rdev->data_offset),
33569 bdevname(rdev->bdev, b));
33570- atomic_add(s, &rdev->corrected_errors);
33571+ atomic_add_unchecked(s, &rdev->corrected_errors);
33572 }
33573
33574 rdev_dec_pending(rdev, mddev);
33575diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33576index 360f2b9..08b5382 100644
33577--- a/drivers/md/raid5.c
33578+++ b/drivers/md/raid5.c
33579@@ -1687,18 +1687,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
33580 (unsigned long long)(sh->sector
33581 + rdev->data_offset),
33582 bdevname(rdev->bdev, b));
33583- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33584+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33585 clear_bit(R5_ReadError, &sh->dev[i].flags);
33586 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33587 }
33588- if (atomic_read(&rdev->read_errors))
33589- atomic_set(&rdev->read_errors, 0);
33590+ if (atomic_read_unchecked(&rdev->read_errors))
33591+ atomic_set_unchecked(&rdev->read_errors, 0);
33592 } else {
33593 const char *bdn = bdevname(rdev->bdev, b);
33594 int retry = 0;
33595
33596 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33597- atomic_inc(&rdev->read_errors);
33598+ atomic_inc_unchecked(&rdev->read_errors);
33599 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
33600 printk_ratelimited(
33601 KERN_WARNING
33602@@ -1727,7 +1727,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33603 (unsigned long long)(sh->sector
33604 + rdev->data_offset),
33605 bdn);
33606- else if (atomic_read(&rdev->read_errors)
33607+ else if (atomic_read_unchecked(&rdev->read_errors)
33608 > conf->max_nr_stripes)
33609 printk(KERN_WARNING
33610 "md/raid:%s: Too many read errors, failing device %s.\n",
33611diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33612index ce4f858..7bcfb46 100644
33613--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33614+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33615@@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
33616 .subvendor = _subvend, .subdevice = _subdev, \
33617 .driver_data = (unsigned long)&_driverdata }
33618
33619-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33620+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33621 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33622 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33623 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33624diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33625index a7d876f..8c21b61 100644
33626--- a/drivers/media/dvb/dvb-core/dvb_demux.h
33627+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33628@@ -73,7 +73,7 @@ struct dvb_demux_feed {
33629 union {
33630 dmx_ts_cb ts;
33631 dmx_section_cb sec;
33632- } cb;
33633+ } __no_const cb;
33634
33635 struct dvb_demux *demux;
33636 void *priv;
33637diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33638index 00a6732..70a682e 100644
33639--- a/drivers/media/dvb/dvb-core/dvbdev.c
33640+++ b/drivers/media/dvb/dvb-core/dvbdev.c
33641@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33642 const struct dvb_device *template, void *priv, int type)
33643 {
33644 struct dvb_device *dvbdev;
33645- struct file_operations *dvbdevfops;
33646+ file_operations_no_const *dvbdevfops;
33647 struct device *clsdev;
33648 int minor;
33649 int id;
33650diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33651index 3940bb0..fb3952a 100644
33652--- a/drivers/media/dvb/dvb-usb/cxusb.c
33653+++ b/drivers/media/dvb/dvb-usb/cxusb.c
33654@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33655
33656 struct dib0700_adapter_state {
33657 int (*set_param_save) (struct dvb_frontend *);
33658-};
33659+} __no_const;
33660
33661 static int dib7070_set_param_override(struct dvb_frontend *fe)
33662 {
33663diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33664index 451c5a7..649f711 100644
33665--- a/drivers/media/dvb/dvb-usb/dw2102.c
33666+++ b/drivers/media/dvb/dvb-usb/dw2102.c
33667@@ -95,7 +95,7 @@ struct su3000_state {
33668
33669 struct s6x0_state {
33670 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33671-};
33672+} __no_const;
33673
33674 /* debug */
33675 static int dvb_usb_dw2102_debug;
33676diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33677index 404f63a..4796533 100644
33678--- a/drivers/media/dvb/frontends/dib3000.h
33679+++ b/drivers/media/dvb/frontends/dib3000.h
33680@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33681 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33682 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33683 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33684-};
33685+} __no_const;
33686
33687 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33688 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33689diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33690index 8418c02..8555013 100644
33691--- a/drivers/media/dvb/ngene/ngene-cards.c
33692+++ b/drivers/media/dvb/ngene/ngene-cards.c
33693@@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
33694
33695 /****************************************************************************/
33696
33697-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33698+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33699 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33700 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33701 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33702diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33703index 16a089f..ab1667d 100644
33704--- a/drivers/media/radio/radio-cadet.c
33705+++ b/drivers/media/radio/radio-cadet.c
33706@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33707 unsigned char readbuf[RDS_BUFFER];
33708 int i = 0;
33709
33710+ if (count > RDS_BUFFER)
33711+ return -EFAULT;
33712 mutex_lock(&dev->lock);
33713 if (dev->rdsstat == 0) {
33714 dev->rdsstat = 1;
33715diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33716index 9cde353..8c6a1c3 100644
33717--- a/drivers/media/video/au0828/au0828.h
33718+++ b/drivers/media/video/au0828/au0828.h
33719@@ -191,7 +191,7 @@ struct au0828_dev {
33720
33721 /* I2C */
33722 struct i2c_adapter i2c_adap;
33723- struct i2c_algorithm i2c_algo;
33724+ i2c_algorithm_no_const i2c_algo;
33725 struct i2c_client i2c_client;
33726 u32 i2c_rc;
33727
33728diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
33729index ee91e295..04ad048 100644
33730--- a/drivers/media/video/cpia2/cpia2_core.c
33731+++ b/drivers/media/video/cpia2/cpia2_core.c
33732@@ -86,6 +86,7 @@ static inline unsigned long kvirt_to_pa(unsigned long adr)
33733 return ret;
33734 }
33735
33736+static void *rvmalloc(unsigned long size) __size_overflow(1);
33737 static void *rvmalloc(unsigned long size)
33738 {
33739 void *mem;
33740diff --git a/drivers/media/video/cx18/cx18-alsa-pcm.c b/drivers/media/video/cx18/cx18-alsa-pcm.c
33741index 82d195b..181103c 100644
33742--- a/drivers/media/video/cx18/cx18-alsa-pcm.c
33743+++ b/drivers/media/video/cx18/cx18-alsa-pcm.c
33744@@ -229,6 +229,8 @@ static int snd_cx18_pcm_ioctl(struct snd_pcm_substream *substream,
33745
33746
33747 static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
33748+ size_t size) __size_overflow(2);
33749+static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
33750 size_t size)
33751 {
33752 struct snd_pcm_runtime *runtime = subs->runtime;
33753diff --git a/drivers/media/video/cx231xx/cx231xx-audio.c b/drivers/media/video/cx231xx/cx231xx-audio.c
33754index a2c2b7d..8f1bec7 100644
33755--- a/drivers/media/video/cx231xx/cx231xx-audio.c
33756+++ b/drivers/media/video/cx231xx/cx231xx-audio.c
33757@@ -389,6 +389,8 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
33758 }
33759
33760 static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
33761+ size_t size) __size_overflow(2);
33762+static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
33763 size_t size)
33764 {
33765 struct snd_pcm_runtime *runtime = subs->runtime;
33766diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
33767index 04bf662..e0ac026 100644
33768--- a/drivers/media/video/cx88/cx88-alsa.c
33769+++ b/drivers/media/video/cx88/cx88-alsa.c
33770@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
33771 * Only boards with eeprom and byte 1 at eeprom=1 have it
33772 */
33773
33774-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33775+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33776 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33777 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33778 {0, }
33779diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
33780index e2a7b77..753d0ee 100644
33781--- a/drivers/media/video/em28xx/em28xx-audio.c
33782+++ b/drivers/media/video/em28xx/em28xx-audio.c
33783@@ -225,6 +225,8 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
33784 }
33785
33786 static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
33787+ size_t size) __size_overflow(2);
33788+static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
33789 size_t size)
33790 {
33791 struct snd_pcm_runtime *runtime = subs->runtime;
33792diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
33793index b09a3c8..6dcba0a 100644
33794--- a/drivers/media/video/meye.c
33795+++ b/drivers/media/video/meye.c
33796@@ -72,6 +72,7 @@ static struct meye meye;
33797 /****************************************************************************/
33798 /* Memory allocation routines (stolen from bttv-driver.c) */
33799 /****************************************************************************/
33800+static void *rvmalloc(unsigned long size) __size_overflow(1);
33801 static void *rvmalloc(unsigned long size)
33802 {
33803 void *mem;
33804diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
33805index 1fb7d5b..3901e77 100644
33806--- a/drivers/media/video/omap/omap_vout.c
33807+++ b/drivers/media/video/omap/omap_vout.c
33808@@ -64,7 +64,6 @@ enum omap_vout_channels {
33809 OMAP_VIDEO2,
33810 };
33811
33812-static struct videobuf_queue_ops video_vbq_ops;
33813 /* Variables configurable through module params*/
33814 static u32 video1_numbuffers = 3;
33815 static u32 video2_numbuffers = 3;
33816@@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
33817 {
33818 struct videobuf_queue *q;
33819 struct omap_vout_device *vout = NULL;
33820+ static struct videobuf_queue_ops video_vbq_ops = {
33821+ .buf_setup = omap_vout_buffer_setup,
33822+ .buf_prepare = omap_vout_buffer_prepare,
33823+ .buf_release = omap_vout_buffer_release,
33824+ .buf_queue = omap_vout_buffer_queue,
33825+ };
33826
33827 vout = video_drvdata(file);
33828 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
33829@@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
33830 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
33831
33832 q = &vout->vbq;
33833- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
33834- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
33835- video_vbq_ops.buf_release = omap_vout_buffer_release;
33836- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
33837 spin_lock_init(&vout->vbq_lock);
33838
33839 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
33840diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33841index 305e6aa..0143317 100644
33842--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33843+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33844@@ -196,7 +196,7 @@ struct pvr2_hdw {
33845
33846 /* I2C stuff */
33847 struct i2c_adapter i2c_adap;
33848- struct i2c_algorithm i2c_algo;
33849+ i2c_algorithm_no_const i2c_algo;
33850 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33851 int i2c_cx25840_hack_state;
33852 int i2c_linked;
33853diff --git a/drivers/media/video/saa7164/saa7164-encoder.c b/drivers/media/video/saa7164/saa7164-encoder.c
33854index 2fd38a0..ddec3c4 100644
33855--- a/drivers/media/video/saa7164/saa7164-encoder.c
33856+++ b/drivers/media/video/saa7164/saa7164-encoder.c
33857@@ -1136,6 +1136,8 @@ struct saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port)
33858 }
33859
33860 static ssize_t fops_read(struct file *file, char __user *buffer,
33861+ size_t count, loff_t *pos) __size_overflow(3);
33862+static ssize_t fops_read(struct file *file, char __user *buffer,
33863 size_t count, loff_t *pos)
33864 {
33865 struct saa7164_encoder_fh *fh = file->private_data;
33866diff --git a/drivers/media/video/saa7164/saa7164-vbi.c b/drivers/media/video/saa7164/saa7164-vbi.c
33867index e2e0341..b80056c 100644
33868--- a/drivers/media/video/saa7164/saa7164-vbi.c
33869+++ b/drivers/media/video/saa7164/saa7164-vbi.c
33870@@ -1081,6 +1081,8 @@ struct saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port)
33871 }
33872
33873 static ssize_t fops_read(struct file *file, char __user *buffer,
33874+ size_t count, loff_t *pos) __size_overflow(3);
33875+static ssize_t fops_read(struct file *file, char __user *buffer,
33876 size_t count, loff_t *pos)
33877 {
33878 struct saa7164_vbi_fh *fh = file->private_data;
33879diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
33880index 4ed1c7c2..8f15e13 100644
33881--- a/drivers/media/video/timblogiw.c
33882+++ b/drivers/media/video/timblogiw.c
33883@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
33884
33885 /* Platform device functions */
33886
33887-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33888+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
33889 .vidioc_querycap = timblogiw_querycap,
33890 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
33891 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
33892@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33893 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
33894 };
33895
33896-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
33897+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
33898 .owner = THIS_MODULE,
33899 .open = timblogiw_open,
33900 .release = timblogiw_close,
33901diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
33902index c969111..a7910f4 100644
33903--- a/drivers/media/video/videobuf-dma-contig.c
33904+++ b/drivers/media/video/videobuf-dma-contig.c
33905@@ -184,6 +184,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
33906 return ret;
33907 }
33908
33909+static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) __size_overflow(1);
33910 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
33911 {
33912 struct videobuf_dma_contig_memory *mem;
33913diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
33914index f300dea..5fc9c4a 100644
33915--- a/drivers/media/video/videobuf-dma-sg.c
33916+++ b/drivers/media/video/videobuf-dma-sg.c
33917@@ -419,6 +419,7 @@ static const struct vm_operations_struct videobuf_vm_ops = {
33918 struct videobuf_dma_sg_memory
33919 */
33920
33921+static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) __size_overflow(1);
33922 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
33923 {
33924 struct videobuf_dma_sg_memory *mem;
33925diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
33926index df14258..12cc7a3 100644
33927--- a/drivers/media/video/videobuf-vmalloc.c
33928+++ b/drivers/media/video/videobuf-vmalloc.c
33929@@ -135,6 +135,7 @@ static const struct vm_operations_struct videobuf_vm_ops = {
33930 struct videobuf_dma_sg_memory
33931 */
33932
33933+static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) __size_overflow(1);
33934 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
33935 {
33936 struct videobuf_vmalloc_memory *mem;
33937diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
33938index a7dc467..a55c423 100644
33939--- a/drivers/message/fusion/mptbase.c
33940+++ b/drivers/message/fusion/mptbase.c
33941@@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
33942 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
33943 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
33944
33945+#ifdef CONFIG_GRKERNSEC_HIDESYM
33946+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
33947+#else
33948 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33949 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
33950+#endif
33951+
33952 /*
33953 * Rounding UP to nearest 4-kB boundary here...
33954 */
33955diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
33956index 551262e..7551198 100644
33957--- a/drivers/message/fusion/mptsas.c
33958+++ b/drivers/message/fusion/mptsas.c
33959@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
33960 return 0;
33961 }
33962
33963+static inline void
33964+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33965+{
33966+ if (phy_info->port_details) {
33967+ phy_info->port_details->rphy = rphy;
33968+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33969+ ioc->name, rphy));
33970+ }
33971+
33972+ if (rphy) {
33973+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33974+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33975+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33976+ ioc->name, rphy, rphy->dev.release));
33977+ }
33978+}
33979+
33980 /* no mutex */
33981 static void
33982 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
33983@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
33984 return NULL;
33985 }
33986
33987-static inline void
33988-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33989-{
33990- if (phy_info->port_details) {
33991- phy_info->port_details->rphy = rphy;
33992- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33993- ioc->name, rphy));
33994- }
33995-
33996- if (rphy) {
33997- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33998- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33999- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34000- ioc->name, rphy, rphy->dev.release));
34001- }
34002-}
34003-
34004 static inline struct sas_port *
34005 mptsas_get_port(struct mptsas_phyinfo *phy_info)
34006 {
34007diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
34008index 0c3ced7..1fe34ec 100644
34009--- a/drivers/message/fusion/mptscsih.c
34010+++ b/drivers/message/fusion/mptscsih.c
34011@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
34012
34013 h = shost_priv(SChost);
34014
34015- if (h) {
34016- if (h->info_kbuf == NULL)
34017- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34018- return h->info_kbuf;
34019- h->info_kbuf[0] = '\0';
34020+ if (!h)
34021+ return NULL;
34022
34023- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34024- h->info_kbuf[size-1] = '\0';
34025- }
34026+ if (h->info_kbuf == NULL)
34027+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34028+ return h->info_kbuf;
34029+ h->info_kbuf[0] = '\0';
34030+
34031+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34032+ h->info_kbuf[size-1] = '\0';
34033
34034 return h->info_kbuf;
34035 }
34036diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
34037index 6d115c7..58ff7fd 100644
34038--- a/drivers/message/i2o/i2o_proc.c
34039+++ b/drivers/message/i2o/i2o_proc.c
34040@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
34041 "Array Controller Device"
34042 };
34043
34044-static char *chtostr(u8 * chars, int n)
34045-{
34046- char tmp[256];
34047- tmp[0] = 0;
34048- return strncat(tmp, (char *)chars, n);
34049-}
34050-
34051 static int i2o_report_query_status(struct seq_file *seq, int block_status,
34052 char *group)
34053 {
34054@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
34055
34056 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
34057 seq_printf(seq, "%-#8x", ddm_table.module_id);
34058- seq_printf(seq, "%-29s",
34059- chtostr(ddm_table.module_name_version, 28));
34060+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
34061 seq_printf(seq, "%9d ", ddm_table.data_size);
34062 seq_printf(seq, "%8d", ddm_table.code_size);
34063
34064@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
34065
34066 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
34067 seq_printf(seq, "%-#8x", dst->module_id);
34068- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
34069- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
34070+ seq_printf(seq, "%-.28s", dst->module_name_version);
34071+ seq_printf(seq, "%-.8s", dst->date);
34072 seq_printf(seq, "%8d ", dst->module_size);
34073 seq_printf(seq, "%8d ", dst->mpb_size);
34074 seq_printf(seq, "0x%04x", dst->module_flags);
34075@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
34076 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
34077 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
34078 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
34079- seq_printf(seq, "Vendor info : %s\n",
34080- chtostr((u8 *) (work32 + 2), 16));
34081- seq_printf(seq, "Product info : %s\n",
34082- chtostr((u8 *) (work32 + 6), 16));
34083- seq_printf(seq, "Description : %s\n",
34084- chtostr((u8 *) (work32 + 10), 16));
34085- seq_printf(seq, "Product rev. : %s\n",
34086- chtostr((u8 *) (work32 + 14), 8));
34087+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
34088+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
34089+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
34090+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
34091
34092 seq_printf(seq, "Serial number : ");
34093 print_serial_number(seq, (u8 *) (work32 + 16),
34094@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
34095 }
34096
34097 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
34098- seq_printf(seq, "Module name : %s\n",
34099- chtostr(result.module_name, 24));
34100- seq_printf(seq, "Module revision : %s\n",
34101- chtostr(result.module_rev, 8));
34102+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
34103+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
34104
34105 seq_printf(seq, "Serial number : ");
34106 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
34107@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
34108 return 0;
34109 }
34110
34111- seq_printf(seq, "Device name : %s\n",
34112- chtostr(result.device_name, 64));
34113- seq_printf(seq, "Service name : %s\n",
34114- chtostr(result.service_name, 64));
34115- seq_printf(seq, "Physical name : %s\n",
34116- chtostr(result.physical_location, 64));
34117- seq_printf(seq, "Instance number : %s\n",
34118- chtostr(result.instance_number, 4));
34119+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
34120+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
34121+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
34122+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
34123
34124 return 0;
34125 }
34126diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
34127index a8c08f3..155fe3d 100644
34128--- a/drivers/message/i2o/iop.c
34129+++ b/drivers/message/i2o/iop.c
34130@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
34131
34132 spin_lock_irqsave(&c->context_list_lock, flags);
34133
34134- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
34135- atomic_inc(&c->context_list_counter);
34136+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
34137+ atomic_inc_unchecked(&c->context_list_counter);
34138
34139- entry->context = atomic_read(&c->context_list_counter);
34140+ entry->context = atomic_read_unchecked(&c->context_list_counter);
34141
34142 list_add(&entry->list, &c->context_list);
34143
34144@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
34145
34146 #if BITS_PER_LONG == 64
34147 spin_lock_init(&c->context_list_lock);
34148- atomic_set(&c->context_list_counter, 0);
34149+ atomic_set_unchecked(&c->context_list_counter, 0);
34150 INIT_LIST_HEAD(&c->context_list);
34151 #endif
34152
34153diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
34154index 7ce65f4..e66e9bc 100644
34155--- a/drivers/mfd/abx500-core.c
34156+++ b/drivers/mfd/abx500-core.c
34157@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
34158
34159 struct abx500_device_entry {
34160 struct list_head list;
34161- struct abx500_ops ops;
34162+ abx500_ops_no_const ops;
34163 struct device *dev;
34164 };
34165
34166diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
34167index a9223ed..4127b13 100644
34168--- a/drivers/mfd/janz-cmodio.c
34169+++ b/drivers/mfd/janz-cmodio.c
34170@@ -13,6 +13,7 @@
34171
34172 #include <linux/kernel.h>
34173 #include <linux/module.h>
34174+#include <linux/slab.h>
34175 #include <linux/init.h>
34176 #include <linux/pci.h>
34177 #include <linux/interrupt.h>
34178diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
34179index a981e2a..5ca0c8b 100644
34180--- a/drivers/misc/lis3lv02d/lis3lv02d.c
34181+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
34182@@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
34183 * the lid is closed. This leads to interrupts as soon as a little move
34184 * is done.
34185 */
34186- atomic_inc(&lis3->count);
34187+ atomic_inc_unchecked(&lis3->count);
34188
34189 wake_up_interruptible(&lis3->misc_wait);
34190 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
34191@@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34192 if (lis3->pm_dev)
34193 pm_runtime_get_sync(lis3->pm_dev);
34194
34195- atomic_set(&lis3->count, 0);
34196+ atomic_set_unchecked(&lis3->count, 0);
34197 return 0;
34198 }
34199
34200@@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34201 add_wait_queue(&lis3->misc_wait, &wait);
34202 while (true) {
34203 set_current_state(TASK_INTERRUPTIBLE);
34204- data = atomic_xchg(&lis3->count, 0);
34205+ data = atomic_xchg_unchecked(&lis3->count, 0);
34206 if (data)
34207 break;
34208
34209@@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34210 struct lis3lv02d, miscdev);
34211
34212 poll_wait(file, &lis3->misc_wait, wait);
34213- if (atomic_read(&lis3->count))
34214+ if (atomic_read_unchecked(&lis3->count))
34215 return POLLIN | POLLRDNORM;
34216 return 0;
34217 }
34218diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
34219index 2b1482a..5d33616 100644
34220--- a/drivers/misc/lis3lv02d/lis3lv02d.h
34221+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
34222@@ -266,7 +266,7 @@ struct lis3lv02d {
34223 struct input_polled_dev *idev; /* input device */
34224 struct platform_device *pdev; /* platform device */
34225 struct regulator_bulk_data regulators[2];
34226- atomic_t count; /* interrupt count after last read */
34227+ atomic_unchecked_t count; /* interrupt count after last read */
34228 union axis_conversion ac; /* hw -> logical axis */
34229 int mapped_btns[3];
34230
34231diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34232index 2f30bad..c4c13d0 100644
34233--- a/drivers/misc/sgi-gru/gruhandles.c
34234+++ b/drivers/misc/sgi-gru/gruhandles.c
34235@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
34236 unsigned long nsec;
34237
34238 nsec = CLKS2NSEC(clks);
34239- atomic_long_inc(&mcs_op_statistics[op].count);
34240- atomic_long_add(nsec, &mcs_op_statistics[op].total);
34241+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34242+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34243 if (mcs_op_statistics[op].max < nsec)
34244 mcs_op_statistics[op].max = nsec;
34245 }
34246diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
34247index 950dbe9..eeef0f8 100644
34248--- a/drivers/misc/sgi-gru/gruprocfs.c
34249+++ b/drivers/misc/sgi-gru/gruprocfs.c
34250@@ -32,9 +32,9 @@
34251
34252 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34253
34254-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34255+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34256 {
34257- unsigned long val = atomic_long_read(v);
34258+ unsigned long val = atomic_long_read_unchecked(v);
34259
34260 seq_printf(s, "%16lu %s\n", val, id);
34261 }
34262@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
34263
34264 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34265 for (op = 0; op < mcsop_last; op++) {
34266- count = atomic_long_read(&mcs_op_statistics[op].count);
34267- total = atomic_long_read(&mcs_op_statistics[op].total);
34268+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34269+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34270 max = mcs_op_statistics[op].max;
34271 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34272 count ? total / count : 0, max);
34273diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34274index 5c3ce24..4915ccb 100644
34275--- a/drivers/misc/sgi-gru/grutables.h
34276+++ b/drivers/misc/sgi-gru/grutables.h
34277@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34278 * GRU statistics.
34279 */
34280 struct gru_stats_s {
34281- atomic_long_t vdata_alloc;
34282- atomic_long_t vdata_free;
34283- atomic_long_t gts_alloc;
34284- atomic_long_t gts_free;
34285- atomic_long_t gms_alloc;
34286- atomic_long_t gms_free;
34287- atomic_long_t gts_double_allocate;
34288- atomic_long_t assign_context;
34289- atomic_long_t assign_context_failed;
34290- atomic_long_t free_context;
34291- atomic_long_t load_user_context;
34292- atomic_long_t load_kernel_context;
34293- atomic_long_t lock_kernel_context;
34294- atomic_long_t unlock_kernel_context;
34295- atomic_long_t steal_user_context;
34296- atomic_long_t steal_kernel_context;
34297- atomic_long_t steal_context_failed;
34298- atomic_long_t nopfn;
34299- atomic_long_t asid_new;
34300- atomic_long_t asid_next;
34301- atomic_long_t asid_wrap;
34302- atomic_long_t asid_reuse;
34303- atomic_long_t intr;
34304- atomic_long_t intr_cbr;
34305- atomic_long_t intr_tfh;
34306- atomic_long_t intr_spurious;
34307- atomic_long_t intr_mm_lock_failed;
34308- atomic_long_t call_os;
34309- atomic_long_t call_os_wait_queue;
34310- atomic_long_t user_flush_tlb;
34311- atomic_long_t user_unload_context;
34312- atomic_long_t user_exception;
34313- atomic_long_t set_context_option;
34314- atomic_long_t check_context_retarget_intr;
34315- atomic_long_t check_context_unload;
34316- atomic_long_t tlb_dropin;
34317- atomic_long_t tlb_preload_page;
34318- atomic_long_t tlb_dropin_fail_no_asid;
34319- atomic_long_t tlb_dropin_fail_upm;
34320- atomic_long_t tlb_dropin_fail_invalid;
34321- atomic_long_t tlb_dropin_fail_range_active;
34322- atomic_long_t tlb_dropin_fail_idle;
34323- atomic_long_t tlb_dropin_fail_fmm;
34324- atomic_long_t tlb_dropin_fail_no_exception;
34325- atomic_long_t tfh_stale_on_fault;
34326- atomic_long_t mmu_invalidate_range;
34327- atomic_long_t mmu_invalidate_page;
34328- atomic_long_t flush_tlb;
34329- atomic_long_t flush_tlb_gru;
34330- atomic_long_t flush_tlb_gru_tgh;
34331- atomic_long_t flush_tlb_gru_zero_asid;
34332+ atomic_long_unchecked_t vdata_alloc;
34333+ atomic_long_unchecked_t vdata_free;
34334+ atomic_long_unchecked_t gts_alloc;
34335+ atomic_long_unchecked_t gts_free;
34336+ atomic_long_unchecked_t gms_alloc;
34337+ atomic_long_unchecked_t gms_free;
34338+ atomic_long_unchecked_t gts_double_allocate;
34339+ atomic_long_unchecked_t assign_context;
34340+ atomic_long_unchecked_t assign_context_failed;
34341+ atomic_long_unchecked_t free_context;
34342+ atomic_long_unchecked_t load_user_context;
34343+ atomic_long_unchecked_t load_kernel_context;
34344+ atomic_long_unchecked_t lock_kernel_context;
34345+ atomic_long_unchecked_t unlock_kernel_context;
34346+ atomic_long_unchecked_t steal_user_context;
34347+ atomic_long_unchecked_t steal_kernel_context;
34348+ atomic_long_unchecked_t steal_context_failed;
34349+ atomic_long_unchecked_t nopfn;
34350+ atomic_long_unchecked_t asid_new;
34351+ atomic_long_unchecked_t asid_next;
34352+ atomic_long_unchecked_t asid_wrap;
34353+ atomic_long_unchecked_t asid_reuse;
34354+ atomic_long_unchecked_t intr;
34355+ atomic_long_unchecked_t intr_cbr;
34356+ atomic_long_unchecked_t intr_tfh;
34357+ atomic_long_unchecked_t intr_spurious;
34358+ atomic_long_unchecked_t intr_mm_lock_failed;
34359+ atomic_long_unchecked_t call_os;
34360+ atomic_long_unchecked_t call_os_wait_queue;
34361+ atomic_long_unchecked_t user_flush_tlb;
34362+ atomic_long_unchecked_t user_unload_context;
34363+ atomic_long_unchecked_t user_exception;
34364+ atomic_long_unchecked_t set_context_option;
34365+ atomic_long_unchecked_t check_context_retarget_intr;
34366+ atomic_long_unchecked_t check_context_unload;
34367+ atomic_long_unchecked_t tlb_dropin;
34368+ atomic_long_unchecked_t tlb_preload_page;
34369+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34370+ atomic_long_unchecked_t tlb_dropin_fail_upm;
34371+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
34372+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
34373+ atomic_long_unchecked_t tlb_dropin_fail_idle;
34374+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
34375+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34376+ atomic_long_unchecked_t tfh_stale_on_fault;
34377+ atomic_long_unchecked_t mmu_invalidate_range;
34378+ atomic_long_unchecked_t mmu_invalidate_page;
34379+ atomic_long_unchecked_t flush_tlb;
34380+ atomic_long_unchecked_t flush_tlb_gru;
34381+ atomic_long_unchecked_t flush_tlb_gru_tgh;
34382+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
34383
34384- atomic_long_t copy_gpa;
34385- atomic_long_t read_gpa;
34386+ atomic_long_unchecked_t copy_gpa;
34387+ atomic_long_unchecked_t read_gpa;
34388
34389- atomic_long_t mesq_receive;
34390- atomic_long_t mesq_receive_none;
34391- atomic_long_t mesq_send;
34392- atomic_long_t mesq_send_failed;
34393- atomic_long_t mesq_noop;
34394- atomic_long_t mesq_send_unexpected_error;
34395- atomic_long_t mesq_send_lb_overflow;
34396- atomic_long_t mesq_send_qlimit_reached;
34397- atomic_long_t mesq_send_amo_nacked;
34398- atomic_long_t mesq_send_put_nacked;
34399- atomic_long_t mesq_page_overflow;
34400- atomic_long_t mesq_qf_locked;
34401- atomic_long_t mesq_qf_noop_not_full;
34402- atomic_long_t mesq_qf_switch_head_failed;
34403- atomic_long_t mesq_qf_unexpected_error;
34404- atomic_long_t mesq_noop_unexpected_error;
34405- atomic_long_t mesq_noop_lb_overflow;
34406- atomic_long_t mesq_noop_qlimit_reached;
34407- atomic_long_t mesq_noop_amo_nacked;
34408- atomic_long_t mesq_noop_put_nacked;
34409- atomic_long_t mesq_noop_page_overflow;
34410+ atomic_long_unchecked_t mesq_receive;
34411+ atomic_long_unchecked_t mesq_receive_none;
34412+ atomic_long_unchecked_t mesq_send;
34413+ atomic_long_unchecked_t mesq_send_failed;
34414+ atomic_long_unchecked_t mesq_noop;
34415+ atomic_long_unchecked_t mesq_send_unexpected_error;
34416+ atomic_long_unchecked_t mesq_send_lb_overflow;
34417+ atomic_long_unchecked_t mesq_send_qlimit_reached;
34418+ atomic_long_unchecked_t mesq_send_amo_nacked;
34419+ atomic_long_unchecked_t mesq_send_put_nacked;
34420+ atomic_long_unchecked_t mesq_page_overflow;
34421+ atomic_long_unchecked_t mesq_qf_locked;
34422+ atomic_long_unchecked_t mesq_qf_noop_not_full;
34423+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
34424+ atomic_long_unchecked_t mesq_qf_unexpected_error;
34425+ atomic_long_unchecked_t mesq_noop_unexpected_error;
34426+ atomic_long_unchecked_t mesq_noop_lb_overflow;
34427+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
34428+ atomic_long_unchecked_t mesq_noop_amo_nacked;
34429+ atomic_long_unchecked_t mesq_noop_put_nacked;
34430+ atomic_long_unchecked_t mesq_noop_page_overflow;
34431
34432 };
34433
34434@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
34435 tghop_invalidate, mcsop_last};
34436
34437 struct mcs_op_statistic {
34438- atomic_long_t count;
34439- atomic_long_t total;
34440+ atomic_long_unchecked_t count;
34441+ atomic_long_unchecked_t total;
34442 unsigned long max;
34443 };
34444
34445@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
34446
34447 #define STAT(id) do { \
34448 if (gru_options & OPT_STATS) \
34449- atomic_long_inc(&gru_stats.id); \
34450+ atomic_long_inc_unchecked(&gru_stats.id); \
34451 } while (0)
34452
34453 #ifdef CONFIG_SGI_GRU_DEBUG
34454diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
34455index 851b2f2..a4ec097 100644
34456--- a/drivers/misc/sgi-xp/xp.h
34457+++ b/drivers/misc/sgi-xp/xp.h
34458@@ -289,7 +289,7 @@ struct xpc_interface {
34459 xpc_notify_func, void *);
34460 void (*received) (short, int, void *);
34461 enum xp_retval (*partid_to_nasids) (short, void *);
34462-};
34463+} __no_const;
34464
34465 extern struct xpc_interface xpc_interface;
34466
34467diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
34468index b94d5f7..7f494c5 100644
34469--- a/drivers/misc/sgi-xp/xpc.h
34470+++ b/drivers/misc/sgi-xp/xpc.h
34471@@ -835,6 +835,7 @@ struct xpc_arch_operations {
34472 void (*received_payload) (struct xpc_channel *, void *);
34473 void (*notify_senders_of_disconnect) (struct xpc_channel *);
34474 };
34475+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
34476
34477 /* struct xpc_partition act_state values (for XPC HB) */
34478
34479@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
34480 /* found in xpc_main.c */
34481 extern struct device *xpc_part;
34482 extern struct device *xpc_chan;
34483-extern struct xpc_arch_operations xpc_arch_ops;
34484+extern xpc_arch_operations_no_const xpc_arch_ops;
34485 extern int xpc_disengage_timelimit;
34486 extern int xpc_disengage_timedout;
34487 extern int xpc_activate_IRQ_rcvd;
34488diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
34489index 8d082b4..aa749ae 100644
34490--- a/drivers/misc/sgi-xp/xpc_main.c
34491+++ b/drivers/misc/sgi-xp/xpc_main.c
34492@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
34493 .notifier_call = xpc_system_die,
34494 };
34495
34496-struct xpc_arch_operations xpc_arch_ops;
34497+xpc_arch_operations_no_const xpc_arch_ops;
34498
34499 /*
34500 * Timer function to enforce the timelimit on the partition disengage.
34501diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
34502index 6ebdc40..9edf5d8 100644
34503--- a/drivers/mmc/host/sdhci-pci.c
34504+++ b/drivers/mmc/host/sdhci-pci.c
34505@@ -631,7 +631,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
34506 .probe = via_probe,
34507 };
34508
34509-static const struct pci_device_id pci_ids[] __devinitdata = {
34510+static const struct pci_device_id pci_ids[] __devinitconst = {
34511 {
34512 .vendor = PCI_VENDOR_ID_RICOH,
34513 .device = PCI_DEVICE_ID_RICOH_R5C822,
34514diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
34515index b1cdf64..ce6e438 100644
34516--- a/drivers/mtd/devices/doc2000.c
34517+++ b/drivers/mtd/devices/doc2000.c
34518@@ -764,7 +764,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
34519
34520 /* The ECC will not be calculated correctly if less than 512 is written */
34521 /* DBB-
34522- if (len != 0x200 && eccbuf)
34523+ if (len != 0x200)
34524 printk(KERN_WARNING
34525 "ECC needs a full sector write (adr: %lx size %lx)\n",
34526 (long) to, (long) len);
34527diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
34528index 7543b98..7069947 100644
34529--- a/drivers/mtd/devices/doc2001.c
34530+++ b/drivers/mtd/devices/doc2001.c
34531@@ -384,7 +384,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
34532 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
34533
34534 /* Don't allow read past end of device */
34535- if (from >= this->totlen)
34536+ if (from >= this->totlen || !len)
34537 return -EINVAL;
34538
34539 /* Don't allow a single read to cross a 512-byte block boundary */
34540diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
34541index 3984d48..28aa897 100644
34542--- a/drivers/mtd/nand/denali.c
34543+++ b/drivers/mtd/nand/denali.c
34544@@ -26,6 +26,7 @@
34545 #include <linux/pci.h>
34546 #include <linux/mtd/mtd.h>
34547 #include <linux/module.h>
34548+#include <linux/slab.h>
34549
34550 #include "denali.h"
34551
34552diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
34553index 51b9d6a..52af9a7 100644
34554--- a/drivers/mtd/nftlmount.c
34555+++ b/drivers/mtd/nftlmount.c
34556@@ -24,6 +24,7 @@
34557 #include <asm/errno.h>
34558 #include <linux/delay.h>
34559 #include <linux/slab.h>
34560+#include <linux/sched.h>
34561 #include <linux/mtd/mtd.h>
34562 #include <linux/mtd/nand.h>
34563 #include <linux/mtd/nftl.h>
34564diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
34565index e2cdebf..d48183a 100644
34566--- a/drivers/mtd/ubi/debug.c
34567+++ b/drivers/mtd/ubi/debug.c
34568@@ -338,6 +338,8 @@ out:
34569
34570 /* Write an UBI debugfs file */
34571 static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
34572+ size_t count, loff_t *ppos) __size_overflow(3);
34573+static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
34574 size_t count, loff_t *ppos)
34575 {
34576 unsigned long ubi_num = (unsigned long)file->private_data;
34577diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
34578index 071f4c8..440862e 100644
34579--- a/drivers/net/ethernet/atheros/atlx/atl2.c
34580+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
34581@@ -2862,7 +2862,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
34582 */
34583
34584 #define ATL2_PARAM(X, desc) \
34585- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34586+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34587 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
34588 MODULE_PARM_DESC(X, desc);
34589 #else
34590diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34591index 66da39f..5dc436d 100644
34592--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34593+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34594@@ -473,7 +473,7 @@ struct bnx2x_rx_mode_obj {
34595
34596 int (*wait_comp)(struct bnx2x *bp,
34597 struct bnx2x_rx_mode_ramrod_params *p);
34598-};
34599+} __no_const;
34600
34601 /********************** Set multicast group ***********************************/
34602
34603diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
34604index aea8f72..fcebf75 100644
34605--- a/drivers/net/ethernet/broadcom/tg3.h
34606+++ b/drivers/net/ethernet/broadcom/tg3.h
34607@@ -140,6 +140,7 @@
34608 #define CHIPREV_ID_5750_A0 0x4000
34609 #define CHIPREV_ID_5750_A1 0x4001
34610 #define CHIPREV_ID_5750_A3 0x4003
34611+#define CHIPREV_ID_5750_C1 0x4201
34612 #define CHIPREV_ID_5750_C2 0x4202
34613 #define CHIPREV_ID_5752_A0_HW 0x5000
34614 #define CHIPREV_ID_5752_A0 0x6000
34615diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
34616index 47a8435..248e4b3 100644
34617--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
34618+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
34619@@ -1052,6 +1052,8 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
34620 * be copied but there is no memory for the copy.
34621 */
34622 static inline struct sk_buff *get_packet(struct pci_dev *pdev,
34623+ struct freelQ *fl, unsigned int len) __size_overflow(3);
34624+static inline struct sk_buff *get_packet(struct pci_dev *pdev,
34625 struct freelQ *fl, unsigned int len)
34626 {
34627 struct sk_buff *skb;
34628diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34629index c4e8643..0979484 100644
34630--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34631+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34632@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
34633 */
34634 struct l2t_skb_cb {
34635 arp_failure_handler_func arp_failure_handler;
34636-};
34637+} __no_const;
34638
34639 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34640
34641diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
34642index cfb60e1..94af340 100644
34643--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
34644+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
34645@@ -611,6 +611,8 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
34646 * of the SW ring.
34647 */
34648 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
34649+ size_t sw_size, dma_addr_t * phys, void *metadata) __size_overflow(2,4);
34650+static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
34651 size_t sw_size, dma_addr_t * phys, void *metadata)
34652 {
34653 size_t len = nelem * elem_size;
34654@@ -777,6 +779,8 @@ static inline unsigned int flits_to_desc(unsigned int n)
34655 * be copied but there is no memory for the copy.
34656 */
34657 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
34658+ unsigned int len, unsigned int drop_thres) __size_overflow(3);
34659+static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
34660 unsigned int len, unsigned int drop_thres)
34661 {
34662 struct sk_buff *skb = NULL;
34663diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
34664index 2dae795..73037d2 100644
34665--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
34666+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
34667@@ -593,6 +593,9 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
34668 */
34669 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
34670 size_t sw_size, dma_addr_t *phys, void *metadata,
34671+ size_t stat_size, int node) __size_overflow(2,4);
34672+static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
34673+ size_t sw_size, dma_addr_t *phys, void *metadata,
34674 size_t stat_size, int node)
34675 {
34676 size_t len = nelem * elem_size + stat_size;
34677diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
34678index 0bd585b..d954ca5 100644
34679--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
34680+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
34681@@ -729,6 +729,9 @@ static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
34682 */
34683 static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
34684 size_t swsize, dma_addr_t *busaddrp, void *swringp,
34685+ size_t stat_size) __size_overflow(2,4);
34686+static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
34687+ size_t swsize, dma_addr_t *busaddrp, void *swringp,
34688 size_t stat_size)
34689 {
34690 /*
34691diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
34692index 4d71f5a..8004440 100644
34693--- a/drivers/net/ethernet/dec/tulip/de4x5.c
34694+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
34695@@ -5392,7 +5392,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34696 for (i=0; i<ETH_ALEN; i++) {
34697 tmp.addr[i] = dev->dev_addr[i];
34698 }
34699- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34700+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34701 break;
34702
34703 case DE4X5_SET_HWADDR: /* Set the hardware address */
34704@@ -5432,7 +5432,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34705 spin_lock_irqsave(&lp->lock, flags);
34706 memcpy(&statbuf, &lp->pktStats, ioc->len);
34707 spin_unlock_irqrestore(&lp->lock, flags);
34708- if (copy_to_user(ioc->data, &statbuf, ioc->len))
34709+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34710 return -EFAULT;
34711 break;
34712 }
34713diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
34714index 14d5b61..1398636 100644
34715--- a/drivers/net/ethernet/dec/tulip/eeprom.c
34716+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
34717@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34718 {NULL}};
34719
34720
34721-static const char *block_name[] __devinitdata = {
34722+static const char *block_name[] __devinitconst = {
34723 "21140 non-MII",
34724 "21140 MII PHY",
34725 "21142 Serial PHY",
34726diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
34727index 52da7b2..4ddfe1c 100644
34728--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
34729+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
34730@@ -236,7 +236,7 @@ struct pci_id_info {
34731 int drv_flags; /* Driver use, intended as capability flags. */
34732 };
34733
34734-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34735+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34736 { /* Sometime a Level-One switch card. */
34737 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34738 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34739diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
34740index 28a3a9b..d96cb63 100644
34741--- a/drivers/net/ethernet/dlink/sundance.c
34742+++ b/drivers/net/ethernet/dlink/sundance.c
34743@@ -218,7 +218,7 @@ enum {
34744 struct pci_id_info {
34745 const char *name;
34746 };
34747-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34748+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34749 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34750 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34751 {"D-Link DFE-580TX 4 port Server Adapter"},
34752diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
34753index e703d64..d62ecf9 100644
34754--- a/drivers/net/ethernet/emulex/benet/be_main.c
34755+++ b/drivers/net/ethernet/emulex/benet/be_main.c
34756@@ -402,7 +402,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
34757
34758 if (wrapped)
34759 newacc += 65536;
34760- ACCESS_ONCE(*acc) = newacc;
34761+ ACCESS_ONCE_RW(*acc) = newacc;
34762 }
34763
34764 void be_parse_stats(struct be_adapter *adapter)
34765diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
34766index 47f85c3..82ab6c4 100644
34767--- a/drivers/net/ethernet/faraday/ftgmac100.c
34768+++ b/drivers/net/ethernet/faraday/ftgmac100.c
34769@@ -31,6 +31,8 @@
34770 #include <linux/netdevice.h>
34771 #include <linux/phy.h>
34772 #include <linux/platform_device.h>
34773+#include <linux/interrupt.h>
34774+#include <linux/irqreturn.h>
34775 #include <net/ip.h>
34776
34777 #include "ftgmac100.h"
34778diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
34779index bb336a0..4b472da 100644
34780--- a/drivers/net/ethernet/faraday/ftmac100.c
34781+++ b/drivers/net/ethernet/faraday/ftmac100.c
34782@@ -31,6 +31,8 @@
34783 #include <linux/module.h>
34784 #include <linux/netdevice.h>
34785 #include <linux/platform_device.h>
34786+#include <linux/interrupt.h>
34787+#include <linux/irqreturn.h>
34788
34789 #include "ftmac100.h"
34790
34791diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34792index c82d444..0007fb4 100644
34793--- a/drivers/net/ethernet/fealnx.c
34794+++ b/drivers/net/ethernet/fealnx.c
34795@@ -150,7 +150,7 @@ struct chip_info {
34796 int flags;
34797 };
34798
34799-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34800+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34801 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34802 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34803 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34804diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34805index e1159e5..e18684d 100644
34806--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34807+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
34808@@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
34809 {
34810 struct e1000_hw *hw = &adapter->hw;
34811 struct e1000_mac_info *mac = &hw->mac;
34812- struct e1000_mac_operations *func = &mac->ops;
34813+ e1000_mac_operations_no_const *func = &mac->ops;
34814
34815 /* Set media type */
34816 switch (adapter->pdev->device) {
34817diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
34818index a3e65fd..f451444 100644
34819--- a/drivers/net/ethernet/intel/e1000e/82571.c
34820+++ b/drivers/net/ethernet/intel/e1000e/82571.c
34821@@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
34822 {
34823 struct e1000_hw *hw = &adapter->hw;
34824 struct e1000_mac_info *mac = &hw->mac;
34825- struct e1000_mac_operations *func = &mac->ops;
34826+ e1000_mac_operations_no_const *func = &mac->ops;
34827 u32 swsm = 0;
34828 u32 swsm2 = 0;
34829 bool force_clear_smbi = false;
34830diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34831index 2967039..ca8c40c 100644
34832--- a/drivers/net/ethernet/intel/e1000e/hw.h
34833+++ b/drivers/net/ethernet/intel/e1000e/hw.h
34834@@ -778,6 +778,7 @@ struct e1000_mac_operations {
34835 void (*write_vfta)(struct e1000_hw *, u32, u32);
34836 s32 (*read_mac_addr)(struct e1000_hw *);
34837 };
34838+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34839
34840 /*
34841 * When to use various PHY register access functions:
34842@@ -818,6 +819,7 @@ struct e1000_phy_operations {
34843 void (*power_up)(struct e1000_hw *);
34844 void (*power_down)(struct e1000_hw *);
34845 };
34846+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34847
34848 /* Function pointers for the NVM. */
34849 struct e1000_nvm_operations {
34850@@ -829,9 +831,10 @@ struct e1000_nvm_operations {
34851 s32 (*validate)(struct e1000_hw *);
34852 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34853 };
34854+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34855
34856 struct e1000_mac_info {
34857- struct e1000_mac_operations ops;
34858+ e1000_mac_operations_no_const ops;
34859 u8 addr[ETH_ALEN];
34860 u8 perm_addr[ETH_ALEN];
34861
34862@@ -872,7 +875,7 @@ struct e1000_mac_info {
34863 };
34864
34865 struct e1000_phy_info {
34866- struct e1000_phy_operations ops;
34867+ e1000_phy_operations_no_const ops;
34868
34869 enum e1000_phy_type type;
34870
34871@@ -906,7 +909,7 @@ struct e1000_phy_info {
34872 };
34873
34874 struct e1000_nvm_info {
34875- struct e1000_nvm_operations ops;
34876+ e1000_nvm_operations_no_const ops;
34877
34878 enum e1000_nvm_type type;
34879 enum e1000_nvm_override override;
34880diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34881index f67cbd3..cef9e3d 100644
34882--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34883+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34884@@ -314,6 +314,7 @@ struct e1000_mac_operations {
34885 s32 (*read_mac_addr)(struct e1000_hw *);
34886 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34887 };
34888+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34889
34890 struct e1000_phy_operations {
34891 s32 (*acquire)(struct e1000_hw *);
34892@@ -330,6 +331,7 @@ struct e1000_phy_operations {
34893 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34894 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34895 };
34896+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34897
34898 struct e1000_nvm_operations {
34899 s32 (*acquire)(struct e1000_hw *);
34900@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
34901 s32 (*update)(struct e1000_hw *);
34902 s32 (*validate)(struct e1000_hw *);
34903 };
34904+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34905
34906 struct e1000_info {
34907 s32 (*get_invariants)(struct e1000_hw *);
34908@@ -350,7 +353,7 @@ struct e1000_info {
34909 extern const struct e1000_info e1000_82575_info;
34910
34911 struct e1000_mac_info {
34912- struct e1000_mac_operations ops;
34913+ e1000_mac_operations_no_const ops;
34914
34915 u8 addr[6];
34916 u8 perm_addr[6];
34917@@ -388,7 +391,7 @@ struct e1000_mac_info {
34918 };
34919
34920 struct e1000_phy_info {
34921- struct e1000_phy_operations ops;
34922+ e1000_phy_operations_no_const ops;
34923
34924 enum e1000_phy_type type;
34925
34926@@ -423,7 +426,7 @@ struct e1000_phy_info {
34927 };
34928
34929 struct e1000_nvm_info {
34930- struct e1000_nvm_operations ops;
34931+ e1000_nvm_operations_no_const ops;
34932 enum e1000_nvm_type type;
34933 enum e1000_nvm_override override;
34934
34935@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
34936 s32 (*check_for_ack)(struct e1000_hw *, u16);
34937 s32 (*check_for_rst)(struct e1000_hw *, u16);
34938 };
34939+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34940
34941 struct e1000_mbx_stats {
34942 u32 msgs_tx;
34943@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
34944 };
34945
34946 struct e1000_mbx_info {
34947- struct e1000_mbx_operations ops;
34948+ e1000_mbx_operations_no_const ops;
34949 struct e1000_mbx_stats stats;
34950 u32 timeout;
34951 u32 usec_delay;
34952diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
34953index 57db3c6..aa825fc 100644
34954--- a/drivers/net/ethernet/intel/igbvf/vf.h
34955+++ b/drivers/net/ethernet/intel/igbvf/vf.h
34956@@ -189,9 +189,10 @@ struct e1000_mac_operations {
34957 s32 (*read_mac_addr)(struct e1000_hw *);
34958 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34959 };
34960+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34961
34962 struct e1000_mac_info {
34963- struct e1000_mac_operations ops;
34964+ e1000_mac_operations_no_const ops;
34965 u8 addr[6];
34966 u8 perm_addr[6];
34967
34968@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
34969 s32 (*check_for_ack)(struct e1000_hw *);
34970 s32 (*check_for_rst)(struct e1000_hw *);
34971 };
34972+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34973
34974 struct e1000_mbx_stats {
34975 u32 msgs_tx;
34976@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
34977 };
34978
34979 struct e1000_mbx_info {
34980- struct e1000_mbx_operations ops;
34981+ e1000_mbx_operations_no_const ops;
34982 struct e1000_mbx_stats stats;
34983 u32 timeout;
34984 u32 usec_delay;
34985diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34986index 9b95bef..7e254ee 100644
34987--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34988+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34989@@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
34990 s32 (*update_checksum)(struct ixgbe_hw *);
34991 u16 (*calc_checksum)(struct ixgbe_hw *);
34992 };
34993+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
34994
34995 struct ixgbe_mac_operations {
34996 s32 (*init_hw)(struct ixgbe_hw *);
34997@@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
34998 /* Manageability interface */
34999 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
35000 };
35001+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35002
35003 struct ixgbe_phy_operations {
35004 s32 (*identify)(struct ixgbe_hw *);
35005@@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
35006 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
35007 s32 (*check_overtemp)(struct ixgbe_hw *);
35008 };
35009+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
35010
35011 struct ixgbe_eeprom_info {
35012- struct ixgbe_eeprom_operations ops;
35013+ ixgbe_eeprom_operations_no_const ops;
35014 enum ixgbe_eeprom_type type;
35015 u32 semaphore_delay;
35016 u16 word_size;
35017@@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
35018
35019 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
35020 struct ixgbe_mac_info {
35021- struct ixgbe_mac_operations ops;
35022+ ixgbe_mac_operations_no_const ops;
35023 enum ixgbe_mac_type type;
35024 u8 addr[ETH_ALEN];
35025 u8 perm_addr[ETH_ALEN];
35026@@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
35027 };
35028
35029 struct ixgbe_phy_info {
35030- struct ixgbe_phy_operations ops;
35031+ ixgbe_phy_operations_no_const ops;
35032 struct mdio_if_info mdio;
35033 enum ixgbe_phy_type type;
35034 u32 id;
35035@@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
35036 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
35037 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
35038 };
35039+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35040
35041 struct ixgbe_mbx_stats {
35042 u32 msgs_tx;
35043@@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
35044 };
35045
35046 struct ixgbe_mbx_info {
35047- struct ixgbe_mbx_operations ops;
35048+ ixgbe_mbx_operations_no_const ops;
35049 struct ixgbe_mbx_stats stats;
35050 u32 timeout;
35051 u32 usec_delay;
35052diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
35053index 25c951d..cc7cf33 100644
35054--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
35055+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
35056@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
35057 s32 (*clear_vfta)(struct ixgbe_hw *);
35058 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
35059 };
35060+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35061
35062 enum ixgbe_mac_type {
35063 ixgbe_mac_unknown = 0,
35064@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
35065 };
35066
35067 struct ixgbe_mac_info {
35068- struct ixgbe_mac_operations ops;
35069+ ixgbe_mac_operations_no_const ops;
35070 u8 addr[6];
35071 u8 perm_addr[6];
35072
35073@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
35074 s32 (*check_for_ack)(struct ixgbe_hw *);
35075 s32 (*check_for_rst)(struct ixgbe_hw *);
35076 };
35077+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35078
35079 struct ixgbe_mbx_stats {
35080 u32 msgs_tx;
35081@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
35082 };
35083
35084 struct ixgbe_mbx_info {
35085- struct ixgbe_mbx_operations ops;
35086+ ixgbe_mbx_operations_no_const ops;
35087 struct ixgbe_mbx_stats stats;
35088 u32 timeout;
35089 u32 udelay;
35090diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
35091index 8bf22b6..7f5baaa 100644
35092--- a/drivers/net/ethernet/mellanox/mlx4/main.c
35093+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
35094@@ -41,6 +41,7 @@
35095 #include <linux/slab.h>
35096 #include <linux/io-mapping.h>
35097 #include <linux/delay.h>
35098+#include <linux/sched.h>
35099
35100 #include <linux/mlx4/device.h>
35101 #include <linux/mlx4/doorbell.h>
35102diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35103index 5046a64..71ca936 100644
35104--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
35105+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35106@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
35107 void (*link_down)(struct __vxge_hw_device *devh);
35108 void (*crit_err)(struct __vxge_hw_device *devh,
35109 enum vxge_hw_event type, u64 ext_data);
35110-};
35111+} __no_const;
35112
35113 /*
35114 * struct __vxge_hw_blockpool_entry - Block private data structure
35115diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35116index 4a518a3..936b334 100644
35117--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35118+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35119@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
35120 struct vxge_hw_mempool_dma *dma_object,
35121 u32 index,
35122 u32 is_last);
35123-};
35124+} __no_const;
35125
35126 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
35127 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
35128diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
35129index bbacb37..d60887d 100644
35130--- a/drivers/net/ethernet/realtek/r8169.c
35131+++ b/drivers/net/ethernet/realtek/r8169.c
35132@@ -695,17 +695,17 @@ struct rtl8169_private {
35133 struct mdio_ops {
35134 void (*write)(void __iomem *, int, int);
35135 int (*read)(void __iomem *, int);
35136- } mdio_ops;
35137+ } __no_const mdio_ops;
35138
35139 struct pll_power_ops {
35140 void (*down)(struct rtl8169_private *);
35141 void (*up)(struct rtl8169_private *);
35142- } pll_power_ops;
35143+ } __no_const pll_power_ops;
35144
35145 struct jumbo_ops {
35146 void (*enable)(struct rtl8169_private *);
35147 void (*disable)(struct rtl8169_private *);
35148- } jumbo_ops;
35149+ } __no_const jumbo_ops;
35150
35151 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
35152 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
35153diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
35154index 5b118cd..858b523 100644
35155--- a/drivers/net/ethernet/sis/sis190.c
35156+++ b/drivers/net/ethernet/sis/sis190.c
35157@@ -1622,7 +1622,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
35158 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
35159 struct net_device *dev)
35160 {
35161- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
35162+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
35163 struct sis190_private *tp = netdev_priv(dev);
35164 struct pci_dev *isa_bridge;
35165 u8 reg, tmp8;
35166diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35167index c07cfe9..81cbf7e 100644
35168--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35169+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35170@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
35171
35172 writel(value, ioaddr + MMC_CNTRL);
35173
35174- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35175- MMC_CNTRL, value);
35176+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35177+// MMC_CNTRL, value);
35178 }
35179
35180 /* To mask all all interrupts.*/
35181diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
35182index dec5836..6d4db7d 100644
35183--- a/drivers/net/hyperv/hyperv_net.h
35184+++ b/drivers/net/hyperv/hyperv_net.h
35185@@ -97,7 +97,7 @@ struct rndis_device {
35186
35187 enum rndis_device_state state;
35188 bool link_state;
35189- atomic_t new_req_id;
35190+ atomic_unchecked_t new_req_id;
35191
35192 spinlock_t request_lock;
35193 struct list_head req_list;
35194diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
35195index 133b7fb..d58c559 100644
35196--- a/drivers/net/hyperv/rndis_filter.c
35197+++ b/drivers/net/hyperv/rndis_filter.c
35198@@ -96,7 +96,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35199 * template
35200 */
35201 set = &rndis_msg->msg.set_req;
35202- set->req_id = atomic_inc_return(&dev->new_req_id);
35203+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35204
35205 /* Add to the request list */
35206 spin_lock_irqsave(&dev->request_lock, flags);
35207@@ -627,7 +627,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35208
35209 /* Setup the rndis set */
35210 halt = &request->request_msg.msg.halt_req;
35211- halt->req_id = atomic_inc_return(&dev->new_req_id);
35212+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35213
35214 /* Ignore return since this msg is optional. */
35215 rndis_filter_send_request(dev, request);
35216diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
35217index 486b404..0d6677d 100644
35218--- a/drivers/net/ppp/ppp_generic.c
35219+++ b/drivers/net/ppp/ppp_generic.c
35220@@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35221 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
35222 struct ppp_stats stats;
35223 struct ppp_comp_stats cstats;
35224- char *vers;
35225
35226 switch (cmd) {
35227 case SIOCGPPPSTATS:
35228@@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35229 break;
35230
35231 case SIOCGPPPVER:
35232- vers = PPP_VERSION;
35233- if (copy_to_user(addr, vers, strlen(vers) + 1))
35234+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
35235 break;
35236 err = 0;
35237 break;
35238diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
35239index 515f122..41dd273 100644
35240--- a/drivers/net/tokenring/abyss.c
35241+++ b/drivers/net/tokenring/abyss.c
35242@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
35243
35244 static int __init abyss_init (void)
35245 {
35246- abyss_netdev_ops = tms380tr_netdev_ops;
35247+ pax_open_kernel();
35248+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35249
35250- abyss_netdev_ops.ndo_open = abyss_open;
35251- abyss_netdev_ops.ndo_stop = abyss_close;
35252+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
35253+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
35254+ pax_close_kernel();
35255
35256 return pci_register_driver(&abyss_driver);
35257 }
35258diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
35259index 6153cfd..cf69c1c 100644
35260--- a/drivers/net/tokenring/madgemc.c
35261+++ b/drivers/net/tokenring/madgemc.c
35262@@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
35263
35264 static int __init madgemc_init (void)
35265 {
35266- madgemc_netdev_ops = tms380tr_netdev_ops;
35267- madgemc_netdev_ops.ndo_open = madgemc_open;
35268- madgemc_netdev_ops.ndo_stop = madgemc_close;
35269+ pax_open_kernel();
35270+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35271+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
35272+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
35273+ pax_close_kernel();
35274
35275 return mca_register_driver (&madgemc_driver);
35276 }
35277diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
35278index 8d362e6..f91cc52 100644
35279--- a/drivers/net/tokenring/proteon.c
35280+++ b/drivers/net/tokenring/proteon.c
35281@@ -353,9 +353,11 @@ static int __init proteon_init(void)
35282 struct platform_device *pdev;
35283 int i, num = 0, err = 0;
35284
35285- proteon_netdev_ops = tms380tr_netdev_ops;
35286- proteon_netdev_ops.ndo_open = proteon_open;
35287- proteon_netdev_ops.ndo_stop = tms380tr_close;
35288+ pax_open_kernel();
35289+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35290+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
35291+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
35292+ pax_close_kernel();
35293
35294 err = platform_driver_register(&proteon_driver);
35295 if (err)
35296diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
35297index 46db5c5..37c1536 100644
35298--- a/drivers/net/tokenring/skisa.c
35299+++ b/drivers/net/tokenring/skisa.c
35300@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
35301 struct platform_device *pdev;
35302 int i, num = 0, err = 0;
35303
35304- sk_isa_netdev_ops = tms380tr_netdev_ops;
35305- sk_isa_netdev_ops.ndo_open = sk_isa_open;
35306- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35307+ pax_open_kernel();
35308+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35309+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
35310+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35311+ pax_close_kernel();
35312
35313 err = platform_driver_register(&sk_isa_driver);
35314 if (err)
35315diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
35316index e1324b4..e1b0041 100644
35317--- a/drivers/net/usb/hso.c
35318+++ b/drivers/net/usb/hso.c
35319@@ -71,7 +71,7 @@
35320 #include <asm/byteorder.h>
35321 #include <linux/serial_core.h>
35322 #include <linux/serial.h>
35323-
35324+#include <asm/local.h>
35325
35326 #define MOD_AUTHOR "Option Wireless"
35327 #define MOD_DESCRIPTION "USB High Speed Option driver"
35328@@ -257,7 +257,7 @@ struct hso_serial {
35329
35330 /* from usb_serial_port */
35331 struct tty_struct *tty;
35332- int open_count;
35333+ local_t open_count;
35334 spinlock_t serial_lock;
35335
35336 int (*write_data) (struct hso_serial *serial);
35337@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
35338 struct urb *urb;
35339
35340 urb = serial->rx_urb[0];
35341- if (serial->open_count > 0) {
35342+ if (local_read(&serial->open_count) > 0) {
35343 count = put_rxbuf_data(urb, serial);
35344 if (count == -1)
35345 return;
35346@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
35347 DUMP1(urb->transfer_buffer, urb->actual_length);
35348
35349 /* Anyone listening? */
35350- if (serial->open_count == 0)
35351+ if (local_read(&serial->open_count) == 0)
35352 return;
35353
35354 if (status == 0) {
35355@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35356 spin_unlock_irq(&serial->serial_lock);
35357
35358 /* check for port already opened, if not set the termios */
35359- serial->open_count++;
35360- if (serial->open_count == 1) {
35361+ if (local_inc_return(&serial->open_count) == 1) {
35362 serial->rx_state = RX_IDLE;
35363 /* Force default termio settings */
35364 _hso_serial_set_termios(tty, NULL);
35365@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35366 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35367 if (result) {
35368 hso_stop_serial_device(serial->parent);
35369- serial->open_count--;
35370+ local_dec(&serial->open_count);
35371 kref_put(&serial->parent->ref, hso_serial_ref_free);
35372 }
35373 } else {
35374@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
35375
35376 /* reset the rts and dtr */
35377 /* do the actual close */
35378- serial->open_count--;
35379+ local_dec(&serial->open_count);
35380
35381- if (serial->open_count <= 0) {
35382- serial->open_count = 0;
35383+ if (local_read(&serial->open_count) <= 0) {
35384+ local_set(&serial->open_count, 0);
35385 spin_lock_irq(&serial->serial_lock);
35386 if (serial->tty == tty) {
35387 serial->tty->driver_data = NULL;
35388@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
35389
35390 /* the actual setup */
35391 spin_lock_irqsave(&serial->serial_lock, flags);
35392- if (serial->open_count)
35393+ if (local_read(&serial->open_count))
35394 _hso_serial_set_termios(tty, old);
35395 else
35396 tty->termios = old;
35397@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
35398 D1("Pending read interrupt on port %d\n", i);
35399 spin_lock(&serial->serial_lock);
35400 if (serial->rx_state == RX_IDLE &&
35401- serial->open_count > 0) {
35402+ local_read(&serial->open_count) > 0) {
35403 /* Setup and send a ctrl req read on
35404 * port i */
35405 if (!serial->rx_urb_filled[0]) {
35406@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
35407 /* Start all serial ports */
35408 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35409 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35410- if (dev2ser(serial_table[i])->open_count) {
35411+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
35412 result =
35413 hso_start_serial_device(serial_table[i], GFP_NOIO);
35414 hso_kick_transmit(dev2ser(serial_table[i]));
35415diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
35416index efc0111..79c8f5b 100644
35417--- a/drivers/net/wireless/ath/ath.h
35418+++ b/drivers/net/wireless/ath/ath.h
35419@@ -119,6 +119,7 @@ struct ath_ops {
35420 void (*write_flush) (void *);
35421 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
35422 };
35423+typedef struct ath_ops __no_const ath_ops_no_const;
35424
35425 struct ath_common;
35426 struct ath_bus_ops;
35427diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
35428index 8c5ce8b..abf101b 100644
35429--- a/drivers/net/wireless/ath/ath5k/debug.c
35430+++ b/drivers/net/wireless/ath/ath5k/debug.c
35431@@ -343,6 +343,9 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
35432
35433 static ssize_t write_file_debug(struct file *file,
35434 const char __user *userbuf,
35435+ size_t count, loff_t *ppos) __size_overflow(3);
35436+static ssize_t write_file_debug(struct file *file,
35437+ const char __user *userbuf,
35438 size_t count, loff_t *ppos)
35439 {
35440 struct ath5k_hw *ah = file->private_data;
35441diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35442index 7b6417b..ab5db98 100644
35443--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35444+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35445@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35446 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
35447 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
35448
35449- ACCESS_ONCE(ads->ds_link) = i->link;
35450- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
35451+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
35452+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
35453
35454 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
35455 ctl6 = SM(i->keytype, AR_EncrType);
35456@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35457
35458 if ((i->is_first || i->is_last) &&
35459 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
35460- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
35461+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
35462 | set11nTries(i->rates, 1)
35463 | set11nTries(i->rates, 2)
35464 | set11nTries(i->rates, 3)
35465 | (i->dur_update ? AR_DurUpdateEna : 0)
35466 | SM(0, AR_BurstDur);
35467
35468- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
35469+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
35470 | set11nRate(i->rates, 1)
35471 | set11nRate(i->rates, 2)
35472 | set11nRate(i->rates, 3);
35473 } else {
35474- ACCESS_ONCE(ads->ds_ctl2) = 0;
35475- ACCESS_ONCE(ads->ds_ctl3) = 0;
35476+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
35477+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
35478 }
35479
35480 if (!i->is_first) {
35481- ACCESS_ONCE(ads->ds_ctl0) = 0;
35482- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35483- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35484+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
35485+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35486+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35487 return;
35488 }
35489
35490@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35491 break;
35492 }
35493
35494- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35495+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35496 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35497 | SM(i->txpower, AR_XmitPower)
35498 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35499@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35500 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
35501 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
35502
35503- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35504- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35505+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35506+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35507
35508 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
35509 return;
35510
35511- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35512+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35513 | set11nPktDurRTSCTS(i->rates, 1);
35514
35515- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35516+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35517 | set11nPktDurRTSCTS(i->rates, 3);
35518
35519- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35520+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35521 | set11nRateFlags(i->rates, 1)
35522 | set11nRateFlags(i->rates, 2)
35523 | set11nRateFlags(i->rates, 3)
35524diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35525index 09b8c9d..905339e 100644
35526--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35527+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35528@@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35529 (i->qcu << AR_TxQcuNum_S) | 0x17;
35530
35531 checksum += val;
35532- ACCESS_ONCE(ads->info) = val;
35533+ ACCESS_ONCE_RW(ads->info) = val;
35534
35535 checksum += i->link;
35536- ACCESS_ONCE(ads->link) = i->link;
35537+ ACCESS_ONCE_RW(ads->link) = i->link;
35538
35539 checksum += i->buf_addr[0];
35540- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
35541+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
35542 checksum += i->buf_addr[1];
35543- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
35544+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
35545 checksum += i->buf_addr[2];
35546- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
35547+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
35548 checksum += i->buf_addr[3];
35549- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
35550+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
35551
35552 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
35553- ACCESS_ONCE(ads->ctl3) = val;
35554+ ACCESS_ONCE_RW(ads->ctl3) = val;
35555 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
35556- ACCESS_ONCE(ads->ctl5) = val;
35557+ ACCESS_ONCE_RW(ads->ctl5) = val;
35558 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
35559- ACCESS_ONCE(ads->ctl7) = val;
35560+ ACCESS_ONCE_RW(ads->ctl7) = val;
35561 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
35562- ACCESS_ONCE(ads->ctl9) = val;
35563+ ACCESS_ONCE_RW(ads->ctl9) = val;
35564
35565 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
35566- ACCESS_ONCE(ads->ctl10) = checksum;
35567+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
35568
35569 if (i->is_first || i->is_last) {
35570- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
35571+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
35572 | set11nTries(i->rates, 1)
35573 | set11nTries(i->rates, 2)
35574 | set11nTries(i->rates, 3)
35575 | (i->dur_update ? AR_DurUpdateEna : 0)
35576 | SM(0, AR_BurstDur);
35577
35578- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
35579+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
35580 | set11nRate(i->rates, 1)
35581 | set11nRate(i->rates, 2)
35582 | set11nRate(i->rates, 3);
35583 } else {
35584- ACCESS_ONCE(ads->ctl13) = 0;
35585- ACCESS_ONCE(ads->ctl14) = 0;
35586+ ACCESS_ONCE_RW(ads->ctl13) = 0;
35587+ ACCESS_ONCE_RW(ads->ctl14) = 0;
35588 }
35589
35590 ads->ctl20 = 0;
35591@@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35592
35593 ctl17 = SM(i->keytype, AR_EncrType);
35594 if (!i->is_first) {
35595- ACCESS_ONCE(ads->ctl11) = 0;
35596- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35597- ACCESS_ONCE(ads->ctl15) = 0;
35598- ACCESS_ONCE(ads->ctl16) = 0;
35599- ACCESS_ONCE(ads->ctl17) = ctl17;
35600- ACCESS_ONCE(ads->ctl18) = 0;
35601- ACCESS_ONCE(ads->ctl19) = 0;
35602+ ACCESS_ONCE_RW(ads->ctl11) = 0;
35603+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35604+ ACCESS_ONCE_RW(ads->ctl15) = 0;
35605+ ACCESS_ONCE_RW(ads->ctl16) = 0;
35606+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35607+ ACCESS_ONCE_RW(ads->ctl18) = 0;
35608+ ACCESS_ONCE_RW(ads->ctl19) = 0;
35609 return;
35610 }
35611
35612- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35613+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35614 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35615 | SM(i->txpower, AR_XmitPower)
35616 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35617@@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35618 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
35619 ctl12 |= SM(val, AR_PAPRDChainMask);
35620
35621- ACCESS_ONCE(ads->ctl12) = ctl12;
35622- ACCESS_ONCE(ads->ctl17) = ctl17;
35623+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
35624+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35625
35626- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35627+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35628 | set11nPktDurRTSCTS(i->rates, 1);
35629
35630- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35631+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35632 | set11nPktDurRTSCTS(i->rates, 3);
35633
35634- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
35635+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
35636 | set11nRateFlags(i->rates, 1)
35637 | set11nRateFlags(i->rates, 2)
35638 | set11nRateFlags(i->rates, 3)
35639 | SM(i->rtscts_rate, AR_RTSCTSRate);
35640
35641- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
35642+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
35643 }
35644
35645 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
35646diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
35647index 68d972b..1d9205b 100644
35648--- a/drivers/net/wireless/ath/ath9k/debug.c
35649+++ b/drivers/net/wireless/ath/ath9k/debug.c
35650@@ -60,6 +60,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
35651 }
35652
35653 static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
35654+ size_t count, loff_t *ppos) __size_overflow(3);
35655+static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
35656 size_t count, loff_t *ppos)
35657 {
35658 struct ath_softc *sc = file->private_data;
35659diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
35660index d3ff33c..c98bcda 100644
35661--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
35662+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
35663@@ -464,6 +464,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
35664 }
35665
35666 static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
35667+ size_t count, loff_t *ppos) __size_overflow(3);
35668+static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
35669 size_t count, loff_t *ppos)
35670 {
35671 struct ath9k_htc_priv *priv = file->private_data;
35672diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
35673index c8261d4..8d88929 100644
35674--- a/drivers/net/wireless/ath/ath9k/hw.h
35675+++ b/drivers/net/wireless/ath/ath9k/hw.h
35676@@ -773,7 +773,7 @@ struct ath_hw_private_ops {
35677
35678 /* ANI */
35679 void (*ani_cache_ini_regs)(struct ath_hw *ah);
35680-};
35681+} __no_const;
35682
35683 /**
35684 * struct ath_hw_ops - callbacks used by hardware code and driver code
35685@@ -803,7 +803,7 @@ struct ath_hw_ops {
35686 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
35687 struct ath_hw_antcomb_conf *antconf);
35688
35689-};
35690+} __no_const;
35691
35692 struct ath_nf_limits {
35693 s16 max;
35694@@ -823,7 +823,7 @@ enum ath_cal_list {
35695 #define AH_FASTCC 0x4
35696
35697 struct ath_hw {
35698- struct ath_ops reg_ops;
35699+ ath_ops_no_const reg_ops;
35700
35701 struct ieee80211_hw *hw;
35702 struct ath_common common;
35703diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35704index af00e2c..ab04d34 100644
35705--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35706+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35707@@ -545,7 +545,7 @@ struct phy_func_ptr {
35708 void (*carrsuppr)(struct brcms_phy *);
35709 s32 (*rxsigpwr)(struct brcms_phy *, s32);
35710 void (*detach)(struct brcms_phy *);
35711-};
35712+} __no_const;
35713
35714 struct brcms_phy {
35715 struct brcms_phy_pub pubpi_ro;
35716diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
35717index a7dfba8..e28eacd 100644
35718--- a/drivers/net/wireless/iwlegacy/3945-mac.c
35719+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
35720@@ -3647,7 +3647,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
35721 */
35722 if (il3945_mod_params.disable_hw_scan) {
35723 D_INFO("Disabling hw_scan\n");
35724- il3945_hw_ops.hw_scan = NULL;
35725+ pax_open_kernel();
35726+ *(void **)&il3945_hw_ops.hw_scan = NULL;
35727+ pax_close_kernel();
35728 }
35729
35730 D_INFO("*** LOAD DRIVER ***\n");
35731diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
35732index f8fc239..8cade22 100644
35733--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
35734+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
35735@@ -86,8 +86,8 @@ do { \
35736 } while (0)
35737
35738 #else
35739-#define IWL_DEBUG(m, level, fmt, args...)
35740-#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
35741+#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
35742+#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
35743 #define iwl_print_hex_dump(m, level, p, len)
35744 #define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \
35745 do { \
35746diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
35747index 4b9e730..7603659 100644
35748--- a/drivers/net/wireless/mac80211_hwsim.c
35749+++ b/drivers/net/wireless/mac80211_hwsim.c
35750@@ -1677,9 +1677,11 @@ static int __init init_mac80211_hwsim(void)
35751 return -EINVAL;
35752
35753 if (fake_hw_scan) {
35754- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35755- mac80211_hwsim_ops.sw_scan_start = NULL;
35756- mac80211_hwsim_ops.sw_scan_complete = NULL;
35757+ pax_open_kernel();
35758+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35759+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
35760+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
35761+ pax_close_kernel();
35762 }
35763
35764 spin_lock_init(&hwsim_radio_lock);
35765diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
35766index 3186aa4..b35b09f 100644
35767--- a/drivers/net/wireless/mwifiex/main.h
35768+++ b/drivers/net/wireless/mwifiex/main.h
35769@@ -536,7 +536,7 @@ struct mwifiex_if_ops {
35770 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
35771 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
35772 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
35773-};
35774+} __no_const;
35775
35776 struct mwifiex_adapter {
35777 u8 iface_type;
35778diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
35779index a330c69..a81540f 100644
35780--- a/drivers/net/wireless/rndis_wlan.c
35781+++ b/drivers/net/wireless/rndis_wlan.c
35782@@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
35783
35784 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
35785
35786- if (rts_threshold < 0 || rts_threshold > 2347)
35787+ if (rts_threshold > 2347)
35788 rts_threshold = 2347;
35789
35790 tmp = cpu_to_le32(rts_threshold);
35791diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
35792index a77f1bb..c608b2b 100644
35793--- a/drivers/net/wireless/wl1251/wl1251.h
35794+++ b/drivers/net/wireless/wl1251/wl1251.h
35795@@ -266,7 +266,7 @@ struct wl1251_if_operations {
35796 void (*reset)(struct wl1251 *wl);
35797 void (*enable_irq)(struct wl1251 *wl);
35798 void (*disable_irq)(struct wl1251 *wl);
35799-};
35800+} __no_const;
35801
35802 struct wl1251 {
35803 struct ieee80211_hw *hw;
35804diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
35805index f34b5b2..b5abb9f 100644
35806--- a/drivers/oprofile/buffer_sync.c
35807+++ b/drivers/oprofile/buffer_sync.c
35808@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
35809 if (cookie == NO_COOKIE)
35810 offset = pc;
35811 if (cookie == INVALID_COOKIE) {
35812- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35813+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35814 offset = pc;
35815 }
35816 if (cookie != last_cookie) {
35817@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
35818 /* add userspace sample */
35819
35820 if (!mm) {
35821- atomic_inc(&oprofile_stats.sample_lost_no_mm);
35822+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35823 return 0;
35824 }
35825
35826 cookie = lookup_dcookie(mm, s->eip, &offset);
35827
35828 if (cookie == INVALID_COOKIE) {
35829- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35830+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35831 return 0;
35832 }
35833
35834@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35835 /* ignore backtraces if failed to add a sample */
35836 if (state == sb_bt_start) {
35837 state = sb_bt_ignore;
35838- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35839+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35840 }
35841 }
35842 release_mm(mm);
35843diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35844index c0cc4e7..44d4e54 100644
35845--- a/drivers/oprofile/event_buffer.c
35846+++ b/drivers/oprofile/event_buffer.c
35847@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35848 }
35849
35850 if (buffer_pos == buffer_size) {
35851- atomic_inc(&oprofile_stats.event_lost_overflow);
35852+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35853 return;
35854 }
35855
35856diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35857index ed2c3ec..deda85a 100644
35858--- a/drivers/oprofile/oprof.c
35859+++ b/drivers/oprofile/oprof.c
35860@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35861 if (oprofile_ops.switch_events())
35862 return;
35863
35864- atomic_inc(&oprofile_stats.multiplex_counter);
35865+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35866 start_switch_worker();
35867 }
35868
35869diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
35870index 84a208d..f07d177 100644
35871--- a/drivers/oprofile/oprofile_files.c
35872+++ b/drivers/oprofile/oprofile_files.c
35873@@ -36,6 +36,8 @@ static ssize_t timeout_read(struct file *file, char __user *buf,
35874
35875
35876 static ssize_t timeout_write(struct file *file, char const __user *buf,
35877+ size_t count, loff_t *offset) __size_overflow(3);
35878+static ssize_t timeout_write(struct file *file, char const __user *buf,
35879 size_t count, loff_t *offset)
35880 {
35881 unsigned long val;
35882@@ -72,6 +74,7 @@ static ssize_t depth_read(struct file *file, char __user *buf, size_t count, lof
35883 }
35884
35885
35886+static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
35887 static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
35888 {
35889 unsigned long val;
35890@@ -126,12 +129,14 @@ static const struct file_operations cpu_type_fops = {
35891 };
35892
35893
35894+static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset) __size_overflow(3);
35895 static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
35896 {
35897 return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
35898 }
35899
35900
35901+static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
35902 static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
35903 {
35904 unsigned long val;
35905diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35906index 917d28e..d62d981 100644
35907--- a/drivers/oprofile/oprofile_stats.c
35908+++ b/drivers/oprofile/oprofile_stats.c
35909@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35910 cpu_buf->sample_invalid_eip = 0;
35911 }
35912
35913- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35914- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35915- atomic_set(&oprofile_stats.event_lost_overflow, 0);
35916- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35917- atomic_set(&oprofile_stats.multiplex_counter, 0);
35918+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35919+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35920+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35921+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35922+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35923 }
35924
35925
35926diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35927index 38b6fc0..b5cbfce 100644
35928--- a/drivers/oprofile/oprofile_stats.h
35929+++ b/drivers/oprofile/oprofile_stats.h
35930@@ -13,11 +13,11 @@
35931 #include <linux/atomic.h>
35932
35933 struct oprofile_stat_struct {
35934- atomic_t sample_lost_no_mm;
35935- atomic_t sample_lost_no_mapping;
35936- atomic_t bt_lost_no_mapping;
35937- atomic_t event_lost_overflow;
35938- atomic_t multiplex_counter;
35939+ atomic_unchecked_t sample_lost_no_mm;
35940+ atomic_unchecked_t sample_lost_no_mapping;
35941+ atomic_unchecked_t bt_lost_no_mapping;
35942+ atomic_unchecked_t event_lost_overflow;
35943+ atomic_unchecked_t multiplex_counter;
35944 };
35945
35946 extern struct oprofile_stat_struct oprofile_stats;
35947diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35948index 2f0aa0f..d5246c3 100644
35949--- a/drivers/oprofile/oprofilefs.c
35950+++ b/drivers/oprofile/oprofilefs.c
35951@@ -97,6 +97,7 @@ static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count
35952 }
35953
35954
35955+static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
35956 static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
35957 {
35958 unsigned long value;
35959@@ -193,7 +194,7 @@ static const struct file_operations atomic_ro_fops = {
35960
35961
35962 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35963- char const *name, atomic_t *val)
35964+ char const *name, atomic_unchecked_t *val)
35965 {
35966 return __oprofilefs_create_file(sb, root, name,
35967 &atomic_ro_fops, 0444, val);
35968diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35969index 3f56bc0..707d642 100644
35970--- a/drivers/parport/procfs.c
35971+++ b/drivers/parport/procfs.c
35972@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35973
35974 *ppos += len;
35975
35976- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35977+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35978 }
35979
35980 #ifdef CONFIG_PARPORT_1284
35981@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35982
35983 *ppos += len;
35984
35985- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35986+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35987 }
35988 #endif /* IEEE1284.3 support. */
35989
35990diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35991index 9fff878..ad0ad53 100644
35992--- a/drivers/pci/hotplug/cpci_hotplug.h
35993+++ b/drivers/pci/hotplug/cpci_hotplug.h
35994@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35995 int (*hardware_test) (struct slot* slot, u32 value);
35996 u8 (*get_power) (struct slot* slot);
35997 int (*set_power) (struct slot* slot, int value);
35998-};
35999+} __no_const;
36000
36001 struct cpci_hp_controller {
36002 unsigned int irq;
36003diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
36004index 76ba8a1..20ca857 100644
36005--- a/drivers/pci/hotplug/cpqphp_nvram.c
36006+++ b/drivers/pci/hotplug/cpqphp_nvram.c
36007@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
36008
36009 void compaq_nvram_init (void __iomem *rom_start)
36010 {
36011+
36012+#ifndef CONFIG_PAX_KERNEXEC
36013 if (rom_start) {
36014 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
36015 }
36016+#endif
36017+
36018 dbg("int15 entry = %p\n", compaq_int15_entry_point);
36019
36020 /* initialize our int15 lock */
36021diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
36022index 2275162..95f1a92 100644
36023--- a/drivers/pci/pcie/aspm.c
36024+++ b/drivers/pci/pcie/aspm.c
36025@@ -27,9 +27,9 @@
36026 #define MODULE_PARAM_PREFIX "pcie_aspm."
36027
36028 /* Note: those are not register definitions */
36029-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
36030-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
36031-#define ASPM_STATE_L1 (4) /* L1 state */
36032+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
36033+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
36034+#define ASPM_STATE_L1 (4U) /* L1 state */
36035 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
36036 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
36037
36038diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
36039index 71eac9c..2de27ef 100644
36040--- a/drivers/pci/probe.c
36041+++ b/drivers/pci/probe.c
36042@@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
36043 u32 l, sz, mask;
36044 u16 orig_cmd;
36045
36046- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
36047+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
36048
36049 if (!dev->mmio_always_on) {
36050 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
36051diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
36052index 27911b5..5b6db88 100644
36053--- a/drivers/pci/proc.c
36054+++ b/drivers/pci/proc.c
36055@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
36056 static int __init pci_proc_init(void)
36057 {
36058 struct pci_dev *dev = NULL;
36059+
36060+#ifdef CONFIG_GRKERNSEC_PROC_ADD
36061+#ifdef CONFIG_GRKERNSEC_PROC_USER
36062+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
36063+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
36064+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
36065+#endif
36066+#else
36067 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
36068+#endif
36069 proc_create("devices", 0, proc_bus_pci_dir,
36070 &proc_bus_pci_dev_operations);
36071 proc_initialized = 1;
36072diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
36073index 6f966d6..68e18ed 100644
36074--- a/drivers/platform/x86/asus_acpi.c
36075+++ b/drivers/platform/x86/asus_acpi.c
36076@@ -887,6 +887,8 @@ static int lcd_proc_open(struct inode *inode, struct file *file)
36077 }
36078
36079 static ssize_t lcd_proc_write(struct file *file, const char __user *buffer,
36080+ size_t count, loff_t *pos) __size_overflow(3);
36081+static ssize_t lcd_proc_write(struct file *file, const char __user *buffer,
36082 size_t count, loff_t *pos)
36083 {
36084 int rv, value;
36085diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
36086index ea0c607..58c4628 100644
36087--- a/drivers/platform/x86/thinkpad_acpi.c
36088+++ b/drivers/platform/x86/thinkpad_acpi.c
36089@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
36090 return 0;
36091 }
36092
36093-void static hotkey_mask_warn_incomplete_mask(void)
36094+static void hotkey_mask_warn_incomplete_mask(void)
36095 {
36096 /* log only what the user can fix... */
36097 const u32 wantedmask = hotkey_driver_mask &
36098@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
36099 }
36100 }
36101
36102-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36103- struct tp_nvram_state *newn,
36104- const u32 event_mask)
36105-{
36106-
36107 #define TPACPI_COMPARE_KEY(__scancode, __member) \
36108 do { \
36109 if ((event_mask & (1 << __scancode)) && \
36110@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36111 tpacpi_hotkey_send_key(__scancode); \
36112 } while (0)
36113
36114- void issue_volchange(const unsigned int oldvol,
36115- const unsigned int newvol)
36116- {
36117- unsigned int i = oldvol;
36118+static void issue_volchange(const unsigned int oldvol,
36119+ const unsigned int newvol,
36120+ const u32 event_mask)
36121+{
36122+ unsigned int i = oldvol;
36123
36124- while (i > newvol) {
36125- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36126- i--;
36127- }
36128- while (i < newvol) {
36129- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36130- i++;
36131- }
36132+ while (i > newvol) {
36133+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36134+ i--;
36135 }
36136+ while (i < newvol) {
36137+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36138+ i++;
36139+ }
36140+}
36141
36142- void issue_brightnesschange(const unsigned int oldbrt,
36143- const unsigned int newbrt)
36144- {
36145- unsigned int i = oldbrt;
36146+static void issue_brightnesschange(const unsigned int oldbrt,
36147+ const unsigned int newbrt,
36148+ const u32 event_mask)
36149+{
36150+ unsigned int i = oldbrt;
36151
36152- while (i > newbrt) {
36153- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36154- i--;
36155- }
36156- while (i < newbrt) {
36157- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36158- i++;
36159- }
36160+ while (i > newbrt) {
36161+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36162+ i--;
36163+ }
36164+ while (i < newbrt) {
36165+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36166+ i++;
36167 }
36168+}
36169
36170+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36171+ struct tp_nvram_state *newn,
36172+ const u32 event_mask)
36173+{
36174 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
36175 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
36176 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
36177@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36178 oldn->volume_level != newn->volume_level) {
36179 /* recently muted, or repeated mute keypress, or
36180 * multiple presses ending in mute */
36181- issue_volchange(oldn->volume_level, newn->volume_level);
36182+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36183 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
36184 }
36185 } else {
36186@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36187 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36188 }
36189 if (oldn->volume_level != newn->volume_level) {
36190- issue_volchange(oldn->volume_level, newn->volume_level);
36191+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36192 } else if (oldn->volume_toggle != newn->volume_toggle) {
36193 /* repeated vol up/down keypress at end of scale ? */
36194 if (newn->volume_level == 0)
36195@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36196 /* handle brightness */
36197 if (oldn->brightness_level != newn->brightness_level) {
36198 issue_brightnesschange(oldn->brightness_level,
36199- newn->brightness_level);
36200+ newn->brightness_level,
36201+ event_mask);
36202 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
36203 /* repeated key presses that didn't change state */
36204 if (newn->brightness_level == 0)
36205@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36206 && !tp_features.bright_unkfw)
36207 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36208 }
36209+}
36210
36211 #undef TPACPI_COMPARE_KEY
36212 #undef TPACPI_MAY_SEND_KEY
36213-}
36214
36215 /*
36216 * Polling driver
36217diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
36218index dcdc1f4..85cee16 100644
36219--- a/drivers/platform/x86/toshiba_acpi.c
36220+++ b/drivers/platform/x86/toshiba_acpi.c
36221@@ -517,6 +517,8 @@ static int set_lcd_status(struct backlight_device *bd)
36222 }
36223
36224 static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
36225+ size_t count, loff_t *pos) __size_overflow(3);
36226+static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
36227 size_t count, loff_t *pos)
36228 {
36229 struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
36230diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
36231index b859d16..5cc6b1a 100644
36232--- a/drivers/pnp/pnpbios/bioscalls.c
36233+++ b/drivers/pnp/pnpbios/bioscalls.c
36234@@ -59,7 +59,7 @@ do { \
36235 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36236 } while(0)
36237
36238-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36239+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36240 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36241
36242 /*
36243@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36244
36245 cpu = get_cpu();
36246 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36247+
36248+ pax_open_kernel();
36249 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36250+ pax_close_kernel();
36251
36252 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36253 spin_lock_irqsave(&pnp_bios_lock, flags);
36254@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36255 :"memory");
36256 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36257
36258+ pax_open_kernel();
36259 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36260+ pax_close_kernel();
36261+
36262 put_cpu();
36263
36264 /* If we get here and this is set then the PnP BIOS faulted on us. */
36265@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
36266 return status;
36267 }
36268
36269-void pnpbios_calls_init(union pnp_bios_install_struct *header)
36270+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36271 {
36272 int i;
36273
36274@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36275 pnp_bios_callpoint.offset = header->fields.pm16offset;
36276 pnp_bios_callpoint.segment = PNP_CS16;
36277
36278+ pax_open_kernel();
36279+
36280 for_each_possible_cpu(i) {
36281 struct desc_struct *gdt = get_cpu_gdt_table(i);
36282 if (!gdt)
36283@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36284 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36285 (unsigned long)__va(header->fields.pm16dseg));
36286 }
36287+
36288+ pax_close_kernel();
36289 }
36290diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
36291index b0ecacb..7c9da2e 100644
36292--- a/drivers/pnp/resource.c
36293+++ b/drivers/pnp/resource.c
36294@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
36295 return 1;
36296
36297 /* check if the resource is valid */
36298- if (*irq < 0 || *irq > 15)
36299+ if (*irq > 15)
36300 return 0;
36301
36302 /* check if the resource is reserved */
36303@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
36304 return 1;
36305
36306 /* check if the resource is valid */
36307- if (*dma < 0 || *dma == 4 || *dma > 7)
36308+ if (*dma == 4 || *dma > 7)
36309 return 0;
36310
36311 /* check if the resource is reserved */
36312diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
36313index 1ed6ea0..77c0bd2 100644
36314--- a/drivers/power/bq27x00_battery.c
36315+++ b/drivers/power/bq27x00_battery.c
36316@@ -72,7 +72,7 @@
36317 struct bq27x00_device_info;
36318 struct bq27x00_access_methods {
36319 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
36320-};
36321+} __no_const;
36322
36323 enum bq27x00_chip { BQ27000, BQ27500 };
36324
36325diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
36326index a838e66..a9e1665 100644
36327--- a/drivers/regulator/max8660.c
36328+++ b/drivers/regulator/max8660.c
36329@@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
36330 max8660->shadow_regs[MAX8660_OVER1] = 5;
36331 } else {
36332 /* Otherwise devices can be toggled via software */
36333- max8660_dcdc_ops.enable = max8660_dcdc_enable;
36334- max8660_dcdc_ops.disable = max8660_dcdc_disable;
36335+ pax_open_kernel();
36336+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
36337+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
36338+ pax_close_kernel();
36339 }
36340
36341 /*
36342diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
36343index e8cfc99..072aee2 100644
36344--- a/drivers/regulator/mc13892-regulator.c
36345+++ b/drivers/regulator/mc13892-regulator.c
36346@@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
36347 }
36348 mc13xxx_unlock(mc13892);
36349
36350- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36351+ pax_open_kernel();
36352+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36353 = mc13892_vcam_set_mode;
36354- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36355+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36356 = mc13892_vcam_get_mode;
36357+ pax_close_kernel();
36358
36359 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
36360 ARRAY_SIZE(mc13892_regulators));
36361diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
36362index cace6d3..f623fda 100644
36363--- a/drivers/rtc/rtc-dev.c
36364+++ b/drivers/rtc/rtc-dev.c
36365@@ -14,6 +14,7 @@
36366 #include <linux/module.h>
36367 #include <linux/rtc.h>
36368 #include <linux/sched.h>
36369+#include <linux/grsecurity.h>
36370 #include "rtc-core.h"
36371
36372 static dev_t rtc_devt;
36373@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
36374 if (copy_from_user(&tm, uarg, sizeof(tm)))
36375 return -EFAULT;
36376
36377+ gr_log_timechange();
36378+
36379 return rtc_set_time(rtc, &tm);
36380
36381 case RTC_PIE_ON:
36382diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
36383index ffb5878..e6d785c 100644
36384--- a/drivers/scsi/aacraid/aacraid.h
36385+++ b/drivers/scsi/aacraid/aacraid.h
36386@@ -492,7 +492,7 @@ struct adapter_ops
36387 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36388 /* Administrative operations */
36389 int (*adapter_comm)(struct aac_dev * dev, int comm);
36390-};
36391+} __no_const;
36392
36393 /*
36394 * Define which interrupt handler needs to be installed
36395diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
36396index 705e13e..91c873c 100644
36397--- a/drivers/scsi/aacraid/linit.c
36398+++ b/drivers/scsi/aacraid/linit.c
36399@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
36400 #elif defined(__devinitconst)
36401 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36402 #else
36403-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
36404+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36405 #endif
36406 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
36407 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
36408diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
36409index d5ff142..49c0ebb 100644
36410--- a/drivers/scsi/aic94xx/aic94xx_init.c
36411+++ b/drivers/scsi/aic94xx/aic94xx_init.c
36412@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
36413 .lldd_control_phy = asd_control_phy,
36414 };
36415
36416-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
36417+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
36418 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
36419 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
36420 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
36421diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
36422index a796de9..1ef20e1 100644
36423--- a/drivers/scsi/bfa/bfa.h
36424+++ b/drivers/scsi/bfa/bfa.h
36425@@ -196,7 +196,7 @@ struct bfa_hwif_s {
36426 u32 *end);
36427 int cpe_vec_q0;
36428 int rme_vec_q0;
36429-};
36430+} __no_const;
36431 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36432
36433 struct bfa_faa_cbfn_s {
36434diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
36435index f0f80e2..8ec946b 100644
36436--- a/drivers/scsi/bfa/bfa_fcpim.c
36437+++ b/drivers/scsi/bfa/bfa_fcpim.c
36438@@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
36439
36440 bfa_iotag_attach(fcp);
36441
36442- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
36443+ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
36444 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
36445 (fcp->num_itns * sizeof(struct bfa_itn_s));
36446 memset(fcp->itn_arr, 0,
36447@@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36448 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
36449 {
36450 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
36451- struct bfa_itn_s *itn;
36452+ bfa_itn_s_no_const *itn;
36453
36454 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
36455 itn->isr = isr;
36456diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
36457index 36f26da..38a34a8 100644
36458--- a/drivers/scsi/bfa/bfa_fcpim.h
36459+++ b/drivers/scsi/bfa/bfa_fcpim.h
36460@@ -37,6 +37,7 @@ struct bfa_iotag_s {
36461 struct bfa_itn_s {
36462 bfa_isr_func_t isr;
36463 };
36464+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
36465
36466 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36467 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
36468@@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
36469 struct list_head iotag_tio_free_q; /* free IO resources */
36470 struct list_head iotag_unused_q; /* unused IO resources*/
36471 struct bfa_iotag_s *iotag_arr;
36472- struct bfa_itn_s *itn_arr;
36473+ bfa_itn_s_no_const *itn_arr;
36474 int num_ioim_reqs;
36475 int num_fwtio_reqs;
36476 int num_itns;
36477diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
36478index 546d46b..642fa5b 100644
36479--- a/drivers/scsi/bfa/bfa_ioc.h
36480+++ b/drivers/scsi/bfa/bfa_ioc.h
36481@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
36482 bfa_ioc_disable_cbfn_t disable_cbfn;
36483 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36484 bfa_ioc_reset_cbfn_t reset_cbfn;
36485-};
36486+} __no_const;
36487
36488 /*
36489 * IOC event notification mechanism.
36490@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
36491 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
36492 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
36493 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
36494-};
36495+} __no_const;
36496
36497 /*
36498 * Queue element to wait for room in request queue. FIFO order is
36499diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
36500index 351dc0b..951dc32 100644
36501--- a/drivers/scsi/hosts.c
36502+++ b/drivers/scsi/hosts.c
36503@@ -42,7 +42,7 @@
36504 #include "scsi_logging.h"
36505
36506
36507-static atomic_t scsi_host_next_hn; /* host_no for next new host */
36508+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36509
36510
36511 static void scsi_host_cls_release(struct device *dev)
36512@@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
36513 * subtract one because we increment first then return, but we need to
36514 * know what the next host number was before increment
36515 */
36516- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36517+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36518 shost->dma_channel = 0xff;
36519
36520 /* These three are default values which can be overridden */
36521diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
36522index b96962c..0c82ec2 100644
36523--- a/drivers/scsi/hpsa.c
36524+++ b/drivers/scsi/hpsa.c
36525@@ -507,7 +507,7 @@ static inline u32 next_command(struct ctlr_info *h)
36526 u32 a;
36527
36528 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36529- return h->access.command_completed(h);
36530+ return h->access->command_completed(h);
36531
36532 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36533 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36534@@ -2991,7 +2991,7 @@ static void start_io(struct ctlr_info *h)
36535 while (!list_empty(&h->reqQ)) {
36536 c = list_entry(h->reqQ.next, struct CommandList, list);
36537 /* can't do anything if fifo is full */
36538- if ((h->access.fifo_full(h))) {
36539+ if ((h->access->fifo_full(h))) {
36540 dev_warn(&h->pdev->dev, "fifo full\n");
36541 break;
36542 }
36543@@ -3001,7 +3001,7 @@ static void start_io(struct ctlr_info *h)
36544 h->Qdepth--;
36545
36546 /* Tell the controller execute command */
36547- h->access.submit_command(h, c);
36548+ h->access->submit_command(h, c);
36549
36550 /* Put job onto the completed Q */
36551 addQ(&h->cmpQ, c);
36552@@ -3010,17 +3010,17 @@ static void start_io(struct ctlr_info *h)
36553
36554 static inline unsigned long get_next_completion(struct ctlr_info *h)
36555 {
36556- return h->access.command_completed(h);
36557+ return h->access->command_completed(h);
36558 }
36559
36560 static inline bool interrupt_pending(struct ctlr_info *h)
36561 {
36562- return h->access.intr_pending(h);
36563+ return h->access->intr_pending(h);
36564 }
36565
36566 static inline long interrupt_not_for_us(struct ctlr_info *h)
36567 {
36568- return (h->access.intr_pending(h) == 0) ||
36569+ return (h->access->intr_pending(h) == 0) ||
36570 (h->interrupts_enabled == 0);
36571 }
36572
36573@@ -3919,7 +3919,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
36574 if (prod_index < 0)
36575 return -ENODEV;
36576 h->product_name = products[prod_index].product_name;
36577- h->access = *(products[prod_index].access);
36578+ h->access = products[prod_index].access;
36579
36580 if (hpsa_board_disabled(h->pdev)) {
36581 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36582@@ -4164,7 +4164,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
36583
36584 assert_spin_locked(&lockup_detector_lock);
36585 remove_ctlr_from_lockup_detector_list(h);
36586- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36587+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36588 spin_lock_irqsave(&h->lock, flags);
36589 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
36590 spin_unlock_irqrestore(&h->lock, flags);
36591@@ -4344,7 +4344,7 @@ reinit_after_soft_reset:
36592 }
36593
36594 /* make sure the board interrupts are off */
36595- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36596+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36597
36598 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
36599 goto clean2;
36600@@ -4378,7 +4378,7 @@ reinit_after_soft_reset:
36601 * fake ones to scoop up any residual completions.
36602 */
36603 spin_lock_irqsave(&h->lock, flags);
36604- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36605+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36606 spin_unlock_irqrestore(&h->lock, flags);
36607 free_irq(h->intr[h->intr_mode], h);
36608 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
36609@@ -4397,9 +4397,9 @@ reinit_after_soft_reset:
36610 dev_info(&h->pdev->dev, "Board READY.\n");
36611 dev_info(&h->pdev->dev,
36612 "Waiting for stale completions to drain.\n");
36613- h->access.set_intr_mask(h, HPSA_INTR_ON);
36614+ h->access->set_intr_mask(h, HPSA_INTR_ON);
36615 msleep(10000);
36616- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36617+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36618
36619 rc = controller_reset_failed(h->cfgtable);
36620 if (rc)
36621@@ -4420,7 +4420,7 @@ reinit_after_soft_reset:
36622 }
36623
36624 /* Turn the interrupts on so we can service requests */
36625- h->access.set_intr_mask(h, HPSA_INTR_ON);
36626+ h->access->set_intr_mask(h, HPSA_INTR_ON);
36627
36628 hpsa_hba_inquiry(h);
36629 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
36630@@ -4472,7 +4472,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
36631 * To write all data in the battery backed cache to disks
36632 */
36633 hpsa_flush_cache(h);
36634- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36635+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36636 free_irq(h->intr[h->intr_mode], h);
36637 #ifdef CONFIG_PCI_MSI
36638 if (h->msix_vector)
36639@@ -4636,7 +4636,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
36640 return;
36641 }
36642 /* Change the access methods to the performant access methods */
36643- h->access = SA5_performant_access;
36644+ h->access = &SA5_performant_access;
36645 h->transMethod = CFGTBL_Trans_Performant;
36646 }
36647
36648diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
36649index 91edafb..a9b88ec 100644
36650--- a/drivers/scsi/hpsa.h
36651+++ b/drivers/scsi/hpsa.h
36652@@ -73,7 +73,7 @@ struct ctlr_info {
36653 unsigned int msix_vector;
36654 unsigned int msi_vector;
36655 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
36656- struct access_method access;
36657+ struct access_method *access;
36658
36659 /* queue and queue Info */
36660 struct list_head reqQ;
36661diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
36662index f2df059..a3a9930 100644
36663--- a/drivers/scsi/ips.h
36664+++ b/drivers/scsi/ips.h
36665@@ -1027,7 +1027,7 @@ typedef struct {
36666 int (*intr)(struct ips_ha *);
36667 void (*enableint)(struct ips_ha *);
36668 uint32_t (*statupd)(struct ips_ha *);
36669-} ips_hw_func_t;
36670+} __no_const ips_hw_func_t;
36671
36672 typedef struct ips_ha {
36673 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36674diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
36675index 4d70d96..84d0573 100644
36676--- a/drivers/scsi/libfc/fc_exch.c
36677+++ b/drivers/scsi/libfc/fc_exch.c
36678@@ -105,12 +105,12 @@ struct fc_exch_mgr {
36679 * all together if not used XXX
36680 */
36681 struct {
36682- atomic_t no_free_exch;
36683- atomic_t no_free_exch_xid;
36684- atomic_t xid_not_found;
36685- atomic_t xid_busy;
36686- atomic_t seq_not_found;
36687- atomic_t non_bls_resp;
36688+ atomic_unchecked_t no_free_exch;
36689+ atomic_unchecked_t no_free_exch_xid;
36690+ atomic_unchecked_t xid_not_found;
36691+ atomic_unchecked_t xid_busy;
36692+ atomic_unchecked_t seq_not_found;
36693+ atomic_unchecked_t non_bls_resp;
36694 } stats;
36695 };
36696
36697@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
36698 /* allocate memory for exchange */
36699 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36700 if (!ep) {
36701- atomic_inc(&mp->stats.no_free_exch);
36702+ atomic_inc_unchecked(&mp->stats.no_free_exch);
36703 goto out;
36704 }
36705 memset(ep, 0, sizeof(*ep));
36706@@ -780,7 +780,7 @@ out:
36707 return ep;
36708 err:
36709 spin_unlock_bh(&pool->lock);
36710- atomic_inc(&mp->stats.no_free_exch_xid);
36711+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36712 mempool_free(ep, mp->ep_pool);
36713 return NULL;
36714 }
36715@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36716 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36717 ep = fc_exch_find(mp, xid);
36718 if (!ep) {
36719- atomic_inc(&mp->stats.xid_not_found);
36720+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36721 reject = FC_RJT_OX_ID;
36722 goto out;
36723 }
36724@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36725 ep = fc_exch_find(mp, xid);
36726 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36727 if (ep) {
36728- atomic_inc(&mp->stats.xid_busy);
36729+ atomic_inc_unchecked(&mp->stats.xid_busy);
36730 reject = FC_RJT_RX_ID;
36731 goto rel;
36732 }
36733@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36734 }
36735 xid = ep->xid; /* get our XID */
36736 } else if (!ep) {
36737- atomic_inc(&mp->stats.xid_not_found);
36738+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36739 reject = FC_RJT_RX_ID; /* XID not found */
36740 goto out;
36741 }
36742@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36743 } else {
36744 sp = &ep->seq;
36745 if (sp->id != fh->fh_seq_id) {
36746- atomic_inc(&mp->stats.seq_not_found);
36747+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36748 if (f_ctl & FC_FC_END_SEQ) {
36749 /*
36750 * Update sequence_id based on incoming last
36751@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36752
36753 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36754 if (!ep) {
36755- atomic_inc(&mp->stats.xid_not_found);
36756+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36757 goto out;
36758 }
36759 if (ep->esb_stat & ESB_ST_COMPLETE) {
36760- atomic_inc(&mp->stats.xid_not_found);
36761+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36762 goto rel;
36763 }
36764 if (ep->rxid == FC_XID_UNKNOWN)
36765 ep->rxid = ntohs(fh->fh_rx_id);
36766 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36767- atomic_inc(&mp->stats.xid_not_found);
36768+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36769 goto rel;
36770 }
36771 if (ep->did != ntoh24(fh->fh_s_id) &&
36772 ep->did != FC_FID_FLOGI) {
36773- atomic_inc(&mp->stats.xid_not_found);
36774+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36775 goto rel;
36776 }
36777 sof = fr_sof(fp);
36778@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36779 sp->ssb_stat |= SSB_ST_RESP;
36780 sp->id = fh->fh_seq_id;
36781 } else if (sp->id != fh->fh_seq_id) {
36782- atomic_inc(&mp->stats.seq_not_found);
36783+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36784 goto rel;
36785 }
36786
36787@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36788 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36789
36790 if (!sp)
36791- atomic_inc(&mp->stats.xid_not_found);
36792+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36793 else
36794- atomic_inc(&mp->stats.non_bls_resp);
36795+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
36796
36797 fc_frame_free(fp);
36798 }
36799diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36800index db9238f..4378ed2 100644
36801--- a/drivers/scsi/libsas/sas_ata.c
36802+++ b/drivers/scsi/libsas/sas_ata.c
36803@@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
36804 .postreset = ata_std_postreset,
36805 .error_handler = ata_std_error_handler,
36806 .post_internal_cmd = sas_ata_post_internal,
36807- .qc_defer = ata_std_qc_defer,
36808+ .qc_defer = ata_std_qc_defer,
36809 .qc_prep = ata_noop_qc_prep,
36810 .qc_issue = sas_ata_qc_issue,
36811 .qc_fill_rtf = sas_ata_qc_fill_rtf,
36812diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36813index 825f930..ce42672 100644
36814--- a/drivers/scsi/lpfc/lpfc.h
36815+++ b/drivers/scsi/lpfc/lpfc.h
36816@@ -413,7 +413,7 @@ struct lpfc_vport {
36817 struct dentry *debug_nodelist;
36818 struct dentry *vport_debugfs_root;
36819 struct lpfc_debugfs_trc *disc_trc;
36820- atomic_t disc_trc_cnt;
36821+ atomic_unchecked_t disc_trc_cnt;
36822 #endif
36823 uint8_t stat_data_enabled;
36824 uint8_t stat_data_blocked;
36825@@ -821,8 +821,8 @@ struct lpfc_hba {
36826 struct timer_list fabric_block_timer;
36827 unsigned long bit_flags;
36828 #define FABRIC_COMANDS_BLOCKED 0
36829- atomic_t num_rsrc_err;
36830- atomic_t num_cmd_success;
36831+ atomic_unchecked_t num_rsrc_err;
36832+ atomic_unchecked_t num_cmd_success;
36833 unsigned long last_rsrc_error_time;
36834 unsigned long last_ramp_down_time;
36835 unsigned long last_ramp_up_time;
36836@@ -852,7 +852,7 @@ struct lpfc_hba {
36837
36838 struct dentry *debug_slow_ring_trc;
36839 struct lpfc_debugfs_trc *slow_ring_trc;
36840- atomic_t slow_ring_trc_cnt;
36841+ atomic_unchecked_t slow_ring_trc_cnt;
36842 /* iDiag debugfs sub-directory */
36843 struct dentry *idiag_root;
36844 struct dentry *idiag_pci_cfg;
36845diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36846index 3587a3f..d45b81b 100644
36847--- a/drivers/scsi/lpfc/lpfc_debugfs.c
36848+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36849@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36850
36851 #include <linux/debugfs.h>
36852
36853-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36854+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36855 static unsigned long lpfc_debugfs_start_time = 0L;
36856
36857 /* iDiag */
36858@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36859 lpfc_debugfs_enable = 0;
36860
36861 len = 0;
36862- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36863+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36864 (lpfc_debugfs_max_disc_trc - 1);
36865 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36866 dtp = vport->disc_trc + i;
36867@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36868 lpfc_debugfs_enable = 0;
36869
36870 len = 0;
36871- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36872+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36873 (lpfc_debugfs_max_slow_ring_trc - 1);
36874 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36875 dtp = phba->slow_ring_trc + i;
36876@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36877 !vport || !vport->disc_trc)
36878 return;
36879
36880- index = atomic_inc_return(&vport->disc_trc_cnt) &
36881+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36882 (lpfc_debugfs_max_disc_trc - 1);
36883 dtp = vport->disc_trc + index;
36884 dtp->fmt = fmt;
36885 dtp->data1 = data1;
36886 dtp->data2 = data2;
36887 dtp->data3 = data3;
36888- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36889+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36890 dtp->jif = jiffies;
36891 #endif
36892 return;
36893@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36894 !phba || !phba->slow_ring_trc)
36895 return;
36896
36897- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36898+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36899 (lpfc_debugfs_max_slow_ring_trc - 1);
36900 dtp = phba->slow_ring_trc + index;
36901 dtp->fmt = fmt;
36902 dtp->data1 = data1;
36903 dtp->data2 = data2;
36904 dtp->data3 = data3;
36905- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36906+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36907 dtp->jif = jiffies;
36908 #endif
36909 return;
36910@@ -4040,7 +4040,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36911 "slow_ring buffer\n");
36912 goto debug_failed;
36913 }
36914- atomic_set(&phba->slow_ring_trc_cnt, 0);
36915+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36916 memset(phba->slow_ring_trc, 0,
36917 (sizeof(struct lpfc_debugfs_trc) *
36918 lpfc_debugfs_max_slow_ring_trc));
36919@@ -4086,7 +4086,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36920 "buffer\n");
36921 goto debug_failed;
36922 }
36923- atomic_set(&vport->disc_trc_cnt, 0);
36924+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36925
36926 snprintf(name, sizeof(name), "discovery_trace");
36927 vport->debug_disc_trc =
36928diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36929index dfea2da..8e17227 100644
36930--- a/drivers/scsi/lpfc/lpfc_init.c
36931+++ b/drivers/scsi/lpfc/lpfc_init.c
36932@@ -10145,8 +10145,10 @@ lpfc_init(void)
36933 printk(LPFC_COPYRIGHT "\n");
36934
36935 if (lpfc_enable_npiv) {
36936- lpfc_transport_functions.vport_create = lpfc_vport_create;
36937- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36938+ pax_open_kernel();
36939+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36940+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36941+ pax_close_kernel();
36942 }
36943 lpfc_transport_template =
36944 fc_attach_transport(&lpfc_transport_functions);
36945diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36946index c60f5d0..751535c 100644
36947--- a/drivers/scsi/lpfc/lpfc_scsi.c
36948+++ b/drivers/scsi/lpfc/lpfc_scsi.c
36949@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36950 uint32_t evt_posted;
36951
36952 spin_lock_irqsave(&phba->hbalock, flags);
36953- atomic_inc(&phba->num_rsrc_err);
36954+ atomic_inc_unchecked(&phba->num_rsrc_err);
36955 phba->last_rsrc_error_time = jiffies;
36956
36957 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36958@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36959 unsigned long flags;
36960 struct lpfc_hba *phba = vport->phba;
36961 uint32_t evt_posted;
36962- atomic_inc(&phba->num_cmd_success);
36963+ atomic_inc_unchecked(&phba->num_cmd_success);
36964
36965 if (vport->cfg_lun_queue_depth <= queue_depth)
36966 return;
36967@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36968 unsigned long num_rsrc_err, num_cmd_success;
36969 int i;
36970
36971- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36972- num_cmd_success = atomic_read(&phba->num_cmd_success);
36973+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36974+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36975
36976 vports = lpfc_create_vport_work_array(phba);
36977 if (vports != NULL)
36978@@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36979 }
36980 }
36981 lpfc_destroy_vport_work_array(phba, vports);
36982- atomic_set(&phba->num_rsrc_err, 0);
36983- atomic_set(&phba->num_cmd_success, 0);
36984+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36985+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36986 }
36987
36988 /**
36989@@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36990 }
36991 }
36992 lpfc_destroy_vport_work_array(phba, vports);
36993- atomic_set(&phba->num_rsrc_err, 0);
36994- atomic_set(&phba->num_cmd_success, 0);
36995+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36996+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36997 }
36998
36999 /**
37000diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
37001index ea8a0b4..812a124 100644
37002--- a/drivers/scsi/pmcraid.c
37003+++ b/drivers/scsi/pmcraid.c
37004@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
37005 res->scsi_dev = scsi_dev;
37006 scsi_dev->hostdata = res;
37007 res->change_detected = 0;
37008- atomic_set(&res->read_failures, 0);
37009- atomic_set(&res->write_failures, 0);
37010+ atomic_set_unchecked(&res->read_failures, 0);
37011+ atomic_set_unchecked(&res->write_failures, 0);
37012 rc = 0;
37013 }
37014 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
37015@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
37016
37017 /* If this was a SCSI read/write command keep count of errors */
37018 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
37019- atomic_inc(&res->read_failures);
37020+ atomic_inc_unchecked(&res->read_failures);
37021 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
37022- atomic_inc(&res->write_failures);
37023+ atomic_inc_unchecked(&res->write_failures);
37024
37025 if (!RES_IS_GSCSI(res->cfg_entry) &&
37026 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
37027@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
37028 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
37029 * hrrq_id assigned here in queuecommand
37030 */
37031- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
37032+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
37033 pinstance->num_hrrq;
37034 cmd->cmd_done = pmcraid_io_done;
37035
37036@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
37037 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
37038 * hrrq_id assigned here in queuecommand
37039 */
37040- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
37041+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
37042 pinstance->num_hrrq;
37043
37044 if (request_size) {
37045@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
37046
37047 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
37048 /* add resources only after host is added into system */
37049- if (!atomic_read(&pinstance->expose_resources))
37050+ if (!atomic_read_unchecked(&pinstance->expose_resources))
37051 return;
37052
37053 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
37054@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
37055 init_waitqueue_head(&pinstance->reset_wait_q);
37056
37057 atomic_set(&pinstance->outstanding_cmds, 0);
37058- atomic_set(&pinstance->last_message_id, 0);
37059- atomic_set(&pinstance->expose_resources, 0);
37060+ atomic_set_unchecked(&pinstance->last_message_id, 0);
37061+ atomic_set_unchecked(&pinstance->expose_resources, 0);
37062
37063 INIT_LIST_HEAD(&pinstance->free_res_q);
37064 INIT_LIST_HEAD(&pinstance->used_res_q);
37065@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
37066 /* Schedule worker thread to handle CCN and take care of adding and
37067 * removing devices to OS
37068 */
37069- atomic_set(&pinstance->expose_resources, 1);
37070+ atomic_set_unchecked(&pinstance->expose_resources, 1);
37071 schedule_work(&pinstance->worker_q);
37072 return rc;
37073
37074diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
37075index ca496c7..9c791d5 100644
37076--- a/drivers/scsi/pmcraid.h
37077+++ b/drivers/scsi/pmcraid.h
37078@@ -748,7 +748,7 @@ struct pmcraid_instance {
37079 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
37080
37081 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
37082- atomic_t last_message_id;
37083+ atomic_unchecked_t last_message_id;
37084
37085 /* configuration table */
37086 struct pmcraid_config_table *cfg_table;
37087@@ -777,7 +777,7 @@ struct pmcraid_instance {
37088 atomic_t outstanding_cmds;
37089
37090 /* should add/delete resources to mid-layer now ?*/
37091- atomic_t expose_resources;
37092+ atomic_unchecked_t expose_resources;
37093
37094
37095
37096@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
37097 struct pmcraid_config_table_entry_ext cfg_entry_ext;
37098 };
37099 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37100- atomic_t read_failures; /* count of failed READ commands */
37101- atomic_t write_failures; /* count of failed WRITE commands */
37102+ atomic_unchecked_t read_failures; /* count of failed READ commands */
37103+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37104
37105 /* To indicate add/delete/modify during CCN */
37106 u8 change_detected;
37107diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
37108index af1003f..be55a75 100644
37109--- a/drivers/scsi/qla2xxx/qla_def.h
37110+++ b/drivers/scsi/qla2xxx/qla_def.h
37111@@ -2247,7 +2247,7 @@ struct isp_operations {
37112 int (*start_scsi) (srb_t *);
37113 int (*abort_isp) (struct scsi_qla_host *);
37114 int (*iospace_config)(struct qla_hw_data*);
37115-};
37116+} __no_const;
37117
37118 /* MSI-X Support *************************************************************/
37119
37120diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
37121index bfe6854..ceac088 100644
37122--- a/drivers/scsi/qla4xxx/ql4_def.h
37123+++ b/drivers/scsi/qla4xxx/ql4_def.h
37124@@ -261,7 +261,7 @@ struct ddb_entry {
37125 * (4000 only) */
37126 atomic_t relogin_timer; /* Max Time to wait for
37127 * relogin to complete */
37128- atomic_t relogin_retry_count; /* Num of times relogin has been
37129+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37130 * retried */
37131 uint32_t default_time2wait; /* Default Min time between
37132 * relogins (+aens) */
37133diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
37134index ce6d3b7..73fac54 100644
37135--- a/drivers/scsi/qla4xxx/ql4_os.c
37136+++ b/drivers/scsi/qla4xxx/ql4_os.c
37137@@ -2178,12 +2178,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
37138 */
37139 if (!iscsi_is_session_online(cls_sess)) {
37140 /* Reset retry relogin timer */
37141- atomic_inc(&ddb_entry->relogin_retry_count);
37142+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37143 DEBUG2(ql4_printk(KERN_INFO, ha,
37144 "%s: index[%d] relogin timed out-retrying"
37145 " relogin (%d), retry (%d)\n", __func__,
37146 ddb_entry->fw_ddb_index,
37147- atomic_read(&ddb_entry->relogin_retry_count),
37148+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
37149 ddb_entry->default_time2wait + 4));
37150 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
37151 atomic_set(&ddb_entry->retry_relogin_timer,
37152@@ -3953,7 +3953,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
37153
37154 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37155 atomic_set(&ddb_entry->relogin_timer, 0);
37156- atomic_set(&ddb_entry->relogin_retry_count, 0);
37157+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37158 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
37159 ddb_entry->default_relogin_timeout =
37160 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
37161diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
37162index 2aeb2e9..46e3925 100644
37163--- a/drivers/scsi/scsi.c
37164+++ b/drivers/scsi/scsi.c
37165@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
37166 unsigned long timeout;
37167 int rtn = 0;
37168
37169- atomic_inc(&cmd->device->iorequest_cnt);
37170+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37171
37172 /* check if the device is still usable */
37173 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37174diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
37175index b2c95db..227d74e 100644
37176--- a/drivers/scsi/scsi_lib.c
37177+++ b/drivers/scsi/scsi_lib.c
37178@@ -1411,7 +1411,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
37179 shost = sdev->host;
37180 scsi_init_cmd_errh(cmd);
37181 cmd->result = DID_NO_CONNECT << 16;
37182- atomic_inc(&cmd->device->iorequest_cnt);
37183+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37184
37185 /*
37186 * SCSI request completion path will do scsi_device_unbusy(),
37187@@ -1437,9 +1437,9 @@ static void scsi_softirq_done(struct request *rq)
37188
37189 INIT_LIST_HEAD(&cmd->eh_entry);
37190
37191- atomic_inc(&cmd->device->iodone_cnt);
37192+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
37193 if (cmd->result)
37194- atomic_inc(&cmd->device->ioerr_cnt);
37195+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37196
37197 disposition = scsi_decide_disposition(cmd);
37198 if (disposition != SUCCESS &&
37199diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
37200index 04c2a27..9d8bd66 100644
37201--- a/drivers/scsi/scsi_sysfs.c
37202+++ b/drivers/scsi/scsi_sysfs.c
37203@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
37204 char *buf) \
37205 { \
37206 struct scsi_device *sdev = to_scsi_device(dev); \
37207- unsigned long long count = atomic_read(&sdev->field); \
37208+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
37209 return snprintf(buf, 20, "0x%llx\n", count); \
37210 } \
37211 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37212diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
37213index 84a1fdf..693b0d6 100644
37214--- a/drivers/scsi/scsi_tgt_lib.c
37215+++ b/drivers/scsi/scsi_tgt_lib.c
37216@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
37217 int err;
37218
37219 dprintk("%lx %u\n", uaddr, len);
37220- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
37221+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
37222 if (err) {
37223 /*
37224 * TODO: need to fixup sg_tablesize, max_segment_size,
37225diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
37226index f59d4a0..1d89407 100644
37227--- a/drivers/scsi/scsi_transport_fc.c
37228+++ b/drivers/scsi/scsi_transport_fc.c
37229@@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
37230 * Netlink Infrastructure
37231 */
37232
37233-static atomic_t fc_event_seq;
37234+static atomic_unchecked_t fc_event_seq;
37235
37236 /**
37237 * fc_get_event_number - Obtain the next sequential FC event number
37238@@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
37239 u32
37240 fc_get_event_number(void)
37241 {
37242- return atomic_add_return(1, &fc_event_seq);
37243+ return atomic_add_return_unchecked(1, &fc_event_seq);
37244 }
37245 EXPORT_SYMBOL(fc_get_event_number);
37246
37247@@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
37248 {
37249 int error;
37250
37251- atomic_set(&fc_event_seq, 0);
37252+ atomic_set_unchecked(&fc_event_seq, 0);
37253
37254 error = transport_class_register(&fc_host_class);
37255 if (error)
37256@@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
37257 char *cp;
37258
37259 *val = simple_strtoul(buf, &cp, 0);
37260- if ((*cp && (*cp != '\n')) || (*val < 0))
37261+ if (*cp && (*cp != '\n'))
37262 return -EINVAL;
37263 /*
37264 * Check for overflow; dev_loss_tmo is u32
37265diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
37266index e3e3c7d..ebdab62 100644
37267--- a/drivers/scsi/scsi_transport_iscsi.c
37268+++ b/drivers/scsi/scsi_transport_iscsi.c
37269@@ -79,7 +79,7 @@ struct iscsi_internal {
37270 struct transport_container session_cont;
37271 };
37272
37273-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37274+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37275 static struct workqueue_struct *iscsi_eh_timer_workq;
37276
37277 static DEFINE_IDA(iscsi_sess_ida);
37278@@ -1063,7 +1063,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
37279 int err;
37280
37281 ihost = shost->shost_data;
37282- session->sid = atomic_add_return(1, &iscsi_session_nr);
37283+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37284
37285 if (target_id == ISCSI_MAX_TARGET) {
37286 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
37287@@ -2680,7 +2680,7 @@ static __init int iscsi_transport_init(void)
37288 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37289 ISCSI_TRANSPORT_VERSION);
37290
37291- atomic_set(&iscsi_session_nr, 0);
37292+ atomic_set_unchecked(&iscsi_session_nr, 0);
37293
37294 err = class_register(&iscsi_transport_class);
37295 if (err)
37296diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
37297index 21a045e..ec89e03 100644
37298--- a/drivers/scsi/scsi_transport_srp.c
37299+++ b/drivers/scsi/scsi_transport_srp.c
37300@@ -33,7 +33,7 @@
37301 #include "scsi_transport_srp_internal.h"
37302
37303 struct srp_host_attrs {
37304- atomic_t next_port_id;
37305+ atomic_unchecked_t next_port_id;
37306 };
37307 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37308
37309@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
37310 struct Scsi_Host *shost = dev_to_shost(dev);
37311 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37312
37313- atomic_set(&srp_host->next_port_id, 0);
37314+ atomic_set_unchecked(&srp_host->next_port_id, 0);
37315 return 0;
37316 }
37317
37318@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
37319 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37320 rport->roles = ids->roles;
37321
37322- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37323+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37324 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37325
37326 transport_setup_device(&rport->dev);
37327diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
37328index eacd46b..e3f4d62 100644
37329--- a/drivers/scsi/sg.c
37330+++ b/drivers/scsi/sg.c
37331@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
37332 sdp->disk->disk_name,
37333 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
37334 NULL,
37335- (char *)arg);
37336+ (char __user *)arg);
37337 case BLKTRACESTART:
37338 return blk_trace_startstop(sdp->device->request_queue, 1);
37339 case BLKTRACESTOP:
37340@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
37341 const struct file_operations * fops;
37342 };
37343
37344-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37345+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37346 {"allow_dio", &adio_fops},
37347 {"debug", &debug_fops},
37348 {"def_reserved_size", &dressz_fops},
37349@@ -2332,7 +2332,7 @@ sg_proc_init(void)
37350 if (!sg_proc_sgp)
37351 return 1;
37352 for (k = 0; k < num_leaves; ++k) {
37353- struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37354+ const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37355 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
37356 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
37357 }
37358diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
37359index f64250e..1ee3049 100644
37360--- a/drivers/spi/spi-dw-pci.c
37361+++ b/drivers/spi/spi-dw-pci.c
37362@@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
37363 #define spi_resume NULL
37364 #endif
37365
37366-static const struct pci_device_id pci_ids[] __devinitdata = {
37367+static const struct pci_device_id pci_ids[] __devinitconst = {
37368 /* Intel MID platform SPI controller 0 */
37369 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
37370 {},
37371diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
37372index b2ccdea..84cde75 100644
37373--- a/drivers/spi/spi.c
37374+++ b/drivers/spi/spi.c
37375@@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
37376 EXPORT_SYMBOL_GPL(spi_bus_unlock);
37377
37378 /* portable code must never pass more than 32 bytes */
37379-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37380+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
37381
37382 static u8 *buf;
37383
37384diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
37385index 400df8c..065d4f4 100644
37386--- a/drivers/staging/octeon/ethernet-rx.c
37387+++ b/drivers/staging/octeon/ethernet-rx.c
37388@@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37389 /* Increment RX stats for virtual ports */
37390 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37391 #ifdef CONFIG_64BIT
37392- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37393- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37394+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37395+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37396 #else
37397- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37398- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37399+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37400+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37401 #endif
37402 }
37403 netif_receive_skb(skb);
37404@@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37405 dev->name);
37406 */
37407 #ifdef CONFIG_64BIT
37408- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37409+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37410 #else
37411- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37412+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37413 #endif
37414 dev_kfree_skb_irq(skb);
37415 }
37416diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
37417index 9112cd8..92f8d51 100644
37418--- a/drivers/staging/octeon/ethernet.c
37419+++ b/drivers/staging/octeon/ethernet.c
37420@@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
37421 * since the RX tasklet also increments it.
37422 */
37423 #ifdef CONFIG_64BIT
37424- atomic64_add(rx_status.dropped_packets,
37425- (atomic64_t *)&priv->stats.rx_dropped);
37426+ atomic64_add_unchecked(rx_status.dropped_packets,
37427+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37428 #else
37429- atomic_add(rx_status.dropped_packets,
37430- (atomic_t *)&priv->stats.rx_dropped);
37431+ atomic_add_unchecked(rx_status.dropped_packets,
37432+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
37433 #endif
37434 }
37435
37436diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
37437index f9dae95..ff48901 100644
37438--- a/drivers/staging/rtl8192e/rtllib_module.c
37439+++ b/drivers/staging/rtl8192e/rtllib_module.c
37440@@ -215,6 +215,8 @@ static int show_debug_level(char *page, char **start, off_t offset,
37441 }
37442
37443 static int store_debug_level(struct file *file, const char __user *buffer,
37444+ unsigned long count, void *data) __size_overflow(3);
37445+static int store_debug_level(struct file *file, const char __user *buffer,
37446 unsigned long count, void *data)
37447 {
37448 char buf[] = "0x00000000";
37449diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
37450index e3d47bc..85f4d0d 100644
37451--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
37452+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
37453@@ -250,6 +250,8 @@ static int show_debug_level(char *page, char **start, off_t offset,
37454 }
37455
37456 static int store_debug_level(struct file *file, const char *buffer,
37457+ unsigned long count, void *data) __size_overflow(3);
37458+static int store_debug_level(struct file *file, const char *buffer,
37459 unsigned long count, void *data)
37460 {
37461 char buf[] = "0x00000000";
37462diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37463index 86308a0..feaa925 100644
37464--- a/drivers/staging/rtl8712/rtl871x_io.h
37465+++ b/drivers/staging/rtl8712/rtl871x_io.h
37466@@ -108,7 +108,7 @@ struct _io_ops {
37467 u8 *pmem);
37468 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37469 u8 *pmem);
37470-};
37471+} __no_const;
37472
37473 struct io_req {
37474 struct list_head list;
37475diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37476index c7b5e8b..783d6cb 100644
37477--- a/drivers/staging/sbe-2t3e3/netdev.c
37478+++ b/drivers/staging/sbe-2t3e3/netdev.c
37479@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37480 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37481
37482 if (rlen)
37483- if (copy_to_user(data, &resp, rlen))
37484+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37485 return -EFAULT;
37486
37487 return 0;
37488diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
37489index 42cdafe..2769103 100644
37490--- a/drivers/staging/speakup/speakup_soft.c
37491+++ b/drivers/staging/speakup/speakup_soft.c
37492@@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
37493 break;
37494 } else if (!initialized) {
37495 if (*init) {
37496- ch = *init;
37497 init++;
37498 } else {
37499 initialized = 1;
37500 }
37501+ ch = *init;
37502 } else {
37503 ch = synth_buffer_getc();
37504 }
37505diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37506index b8f8c48..1fc5025 100644
37507--- a/drivers/staging/usbip/usbip_common.h
37508+++ b/drivers/staging/usbip/usbip_common.h
37509@@ -289,7 +289,7 @@ struct usbip_device {
37510 void (*shutdown)(struct usbip_device *);
37511 void (*reset)(struct usbip_device *);
37512 void (*unusable)(struct usbip_device *);
37513- } eh_ops;
37514+ } __no_const eh_ops;
37515 };
37516
37517 /* usbip_common.c */
37518diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37519index 88b3298..3783eee 100644
37520--- a/drivers/staging/usbip/vhci.h
37521+++ b/drivers/staging/usbip/vhci.h
37522@@ -88,7 +88,7 @@ struct vhci_hcd {
37523 unsigned resuming:1;
37524 unsigned long re_timeout;
37525
37526- atomic_t seqnum;
37527+ atomic_unchecked_t seqnum;
37528
37529 /*
37530 * NOTE:
37531diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37532index 2ee97e2..0420b86 100644
37533--- a/drivers/staging/usbip/vhci_hcd.c
37534+++ b/drivers/staging/usbip/vhci_hcd.c
37535@@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
37536 return;
37537 }
37538
37539- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37540+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37541 if (priv->seqnum == 0xffff)
37542 dev_info(&urb->dev->dev, "seqnum max\n");
37543
37544@@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37545 return -ENOMEM;
37546 }
37547
37548- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37549+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37550 if (unlink->seqnum == 0xffff)
37551 pr_info("seqnum max\n");
37552
37553@@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
37554 vdev->rhport = rhport;
37555 }
37556
37557- atomic_set(&vhci->seqnum, 0);
37558+ atomic_set_unchecked(&vhci->seqnum, 0);
37559 spin_lock_init(&vhci->lock);
37560
37561 hcd->power_budget = 0; /* no limit */
37562diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37563index 3f511b4..d3dbc1e 100644
37564--- a/drivers/staging/usbip/vhci_rx.c
37565+++ b/drivers/staging/usbip/vhci_rx.c
37566@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37567 if (!urb) {
37568 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37569 pr_info("max seqnum %d\n",
37570- atomic_read(&the_controller->seqnum));
37571+ atomic_read_unchecked(&the_controller->seqnum));
37572 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37573 return;
37574 }
37575diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37576index 7735027..30eed13 100644
37577--- a/drivers/staging/vt6655/hostap.c
37578+++ b/drivers/staging/vt6655/hostap.c
37579@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37580 *
37581 */
37582
37583+static net_device_ops_no_const apdev_netdev_ops;
37584+
37585 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37586 {
37587 PSDevice apdev_priv;
37588 struct net_device *dev = pDevice->dev;
37589 int ret;
37590- const struct net_device_ops apdev_netdev_ops = {
37591- .ndo_start_xmit = pDevice->tx_80211,
37592- };
37593
37594 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37595
37596@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37597 *apdev_priv = *pDevice;
37598 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37599
37600+ /* only half broken now */
37601+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37602 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37603
37604 pDevice->apdev->type = ARPHRD_IEEE80211;
37605diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37606index 51b5adf..098e320 100644
37607--- a/drivers/staging/vt6656/hostap.c
37608+++ b/drivers/staging/vt6656/hostap.c
37609@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37610 *
37611 */
37612
37613+static net_device_ops_no_const apdev_netdev_ops;
37614+
37615 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37616 {
37617 PSDevice apdev_priv;
37618 struct net_device *dev = pDevice->dev;
37619 int ret;
37620- const struct net_device_ops apdev_netdev_ops = {
37621- .ndo_start_xmit = pDevice->tx_80211,
37622- };
37623
37624 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37625
37626@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37627 *apdev_priv = *pDevice;
37628 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37629
37630+ /* only half broken now */
37631+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37632 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37633
37634 pDevice->apdev->type = ARPHRD_IEEE80211;
37635diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37636index 7843dfd..3db105f 100644
37637--- a/drivers/staging/wlan-ng/hfa384x_usb.c
37638+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37639@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37640
37641 struct usbctlx_completor {
37642 int (*complete) (struct usbctlx_completor *);
37643-};
37644+} __no_const;
37645
37646 static int
37647 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37648diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37649index 1ca66ea..76f1343 100644
37650--- a/drivers/staging/zcache/tmem.c
37651+++ b/drivers/staging/zcache/tmem.c
37652@@ -39,7 +39,7 @@
37653 * A tmem host implementation must use this function to register callbacks
37654 * for memory allocation.
37655 */
37656-static struct tmem_hostops tmem_hostops;
37657+static tmem_hostops_no_const tmem_hostops;
37658
37659 static void tmem_objnode_tree_init(void);
37660
37661@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37662 * A tmem host implementation must use this function to register
37663 * callbacks for a page-accessible memory (PAM) implementation
37664 */
37665-static struct tmem_pamops tmem_pamops;
37666+static tmem_pamops_no_const tmem_pamops;
37667
37668 void tmem_register_pamops(struct tmem_pamops *m)
37669 {
37670diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37671index ed147c4..94fc3c6 100644
37672--- a/drivers/staging/zcache/tmem.h
37673+++ b/drivers/staging/zcache/tmem.h
37674@@ -180,6 +180,7 @@ struct tmem_pamops {
37675 void (*new_obj)(struct tmem_obj *);
37676 int (*replace_in_obj)(void *, struct tmem_obj *);
37677 };
37678+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37679 extern void tmem_register_pamops(struct tmem_pamops *m);
37680
37681 /* memory allocation methods provided by the host implementation */
37682@@ -189,6 +190,7 @@ struct tmem_hostops {
37683 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37684 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37685 };
37686+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37687 extern void tmem_register_hostops(struct tmem_hostops *m);
37688
37689 /* core tmem accessor functions */
37690diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
37691index 97c74ee..7f6d77d 100644
37692--- a/drivers/target/iscsi/iscsi_target.c
37693+++ b/drivers/target/iscsi/iscsi_target.c
37694@@ -1361,7 +1361,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
37695 * outstanding_r2ts reaches zero, go ahead and send the delayed
37696 * TASK_ABORTED status.
37697 */
37698- if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
37699+ if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
37700 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
37701 if (--cmd->outstanding_r2ts < 1) {
37702 iscsit_stop_dataout_timer(cmd);
37703diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
37704index dcb0618..97e3d85 100644
37705--- a/drivers/target/target_core_tmr.c
37706+++ b/drivers/target/target_core_tmr.c
37707@@ -260,7 +260,7 @@ static void core_tmr_drain_task_list(
37708 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37709 cmd->t_task_list_num,
37710 atomic_read(&cmd->t_task_cdbs_left),
37711- atomic_read(&cmd->t_task_cdbs_sent),
37712+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37713 atomic_read(&cmd->t_transport_active),
37714 atomic_read(&cmd->t_transport_stop),
37715 atomic_read(&cmd->t_transport_sent));
37716@@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
37717 pr_debug("LUN_RESET: got t_transport_active = 1 for"
37718 " task: %p, t_fe_count: %d dev: %p\n", task,
37719 fe_count, dev);
37720- atomic_set(&cmd->t_transport_aborted, 1);
37721+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
37722 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37723
37724 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
37725@@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
37726 }
37727 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
37728 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
37729- atomic_set(&cmd->t_transport_aborted, 1);
37730+ atomic_set_unchecked(&cmd->t_transport_aborted, 1);
37731 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37732
37733 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
37734diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37735index cd5cd95..5249d30 100644
37736--- a/drivers/target/target_core_transport.c
37737+++ b/drivers/target/target_core_transport.c
37738@@ -1330,7 +1330,7 @@ struct se_device *transport_add_device_to_core_hba(
37739 spin_lock_init(&dev->se_port_lock);
37740 spin_lock_init(&dev->se_tmr_lock);
37741 spin_lock_init(&dev->qf_cmd_lock);
37742- atomic_set(&dev->dev_ordered_id, 0);
37743+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
37744
37745 se_dev_set_default_attribs(dev, dev_limits);
37746
37747@@ -1517,7 +1517,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37748 * Used to determine when ORDERED commands should go from
37749 * Dormant to Active status.
37750 */
37751- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37752+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37753 smp_mb__after_atomic_inc();
37754 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37755 cmd->se_ordered_id, cmd->sam_task_attr,
37756@@ -1862,7 +1862,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
37757 " t_transport_active: %d t_transport_stop: %d"
37758 " t_transport_sent: %d\n", cmd->t_task_list_num,
37759 atomic_read(&cmd->t_task_cdbs_left),
37760- atomic_read(&cmd->t_task_cdbs_sent),
37761+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37762 atomic_read(&cmd->t_task_cdbs_ex_left),
37763 atomic_read(&cmd->t_transport_active),
37764 atomic_read(&cmd->t_transport_stop),
37765@@ -2121,9 +2121,9 @@ check_depth:
37766 cmd = task->task_se_cmd;
37767 spin_lock_irqsave(&cmd->t_state_lock, flags);
37768 task->task_flags |= (TF_ACTIVE | TF_SENT);
37769- atomic_inc(&cmd->t_task_cdbs_sent);
37770+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
37771
37772- if (atomic_read(&cmd->t_task_cdbs_sent) ==
37773+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37774 cmd->t_task_list_num)
37775 atomic_set(&cmd->t_transport_sent, 1);
37776
37777@@ -4348,7 +4348,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
37778 atomic_set(&cmd->transport_lun_stop, 0);
37779 }
37780 if (!atomic_read(&cmd->t_transport_active) ||
37781- atomic_read(&cmd->t_transport_aborted)) {
37782+ atomic_read_unchecked(&cmd->t_transport_aborted)) {
37783 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
37784 return false;
37785 }
37786@@ -4597,7 +4597,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
37787 {
37788 int ret = 0;
37789
37790- if (atomic_read(&cmd->t_transport_aborted) != 0) {
37791+ if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
37792 if (!send_status ||
37793 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
37794 return 1;
37795@@ -4634,7 +4634,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
37796 */
37797 if (cmd->data_direction == DMA_TO_DEVICE) {
37798 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
37799- atomic_inc(&cmd->t_transport_aborted);
37800+ atomic_inc_unchecked(&cmd->t_transport_aborted);
37801 smp_mb__after_atomic_inc();
37802 }
37803 }
37804diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37805index b9040be..e3f5aab 100644
37806--- a/drivers/tty/hvc/hvcs.c
37807+++ b/drivers/tty/hvc/hvcs.c
37808@@ -83,6 +83,7 @@
37809 #include <asm/hvcserver.h>
37810 #include <asm/uaccess.h>
37811 #include <asm/vio.h>
37812+#include <asm/local.h>
37813
37814 /*
37815 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37816@@ -270,7 +271,7 @@ struct hvcs_struct {
37817 unsigned int index;
37818
37819 struct tty_struct *tty;
37820- int open_count;
37821+ local_t open_count;
37822
37823 /*
37824 * Used to tell the driver kernel_thread what operations need to take
37825@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37826
37827 spin_lock_irqsave(&hvcsd->lock, flags);
37828
37829- if (hvcsd->open_count > 0) {
37830+ if (local_read(&hvcsd->open_count) > 0) {
37831 spin_unlock_irqrestore(&hvcsd->lock, flags);
37832 printk(KERN_INFO "HVCS: vterm state unchanged. "
37833 "The hvcs device node is still in use.\n");
37834@@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37835 if ((retval = hvcs_partner_connect(hvcsd)))
37836 goto error_release;
37837
37838- hvcsd->open_count = 1;
37839+ local_set(&hvcsd->open_count, 1);
37840 hvcsd->tty = tty;
37841 tty->driver_data = hvcsd;
37842
37843@@ -1179,7 +1180,7 @@ fast_open:
37844
37845 spin_lock_irqsave(&hvcsd->lock, flags);
37846 kref_get(&hvcsd->kref);
37847- hvcsd->open_count++;
37848+ local_inc(&hvcsd->open_count);
37849 hvcsd->todo_mask |= HVCS_SCHED_READ;
37850 spin_unlock_irqrestore(&hvcsd->lock, flags);
37851
37852@@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37853 hvcsd = tty->driver_data;
37854
37855 spin_lock_irqsave(&hvcsd->lock, flags);
37856- if (--hvcsd->open_count == 0) {
37857+ if (local_dec_and_test(&hvcsd->open_count)) {
37858
37859 vio_disable_interrupts(hvcsd->vdev);
37860
37861@@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37862 free_irq(irq, hvcsd);
37863 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37864 return;
37865- } else if (hvcsd->open_count < 0) {
37866+ } else if (local_read(&hvcsd->open_count) < 0) {
37867 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37868 " is missmanaged.\n",
37869- hvcsd->vdev->unit_address, hvcsd->open_count);
37870+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37871 }
37872
37873 spin_unlock_irqrestore(&hvcsd->lock, flags);
37874@@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37875
37876 spin_lock_irqsave(&hvcsd->lock, flags);
37877 /* Preserve this so that we know how many kref refs to put */
37878- temp_open_count = hvcsd->open_count;
37879+ temp_open_count = local_read(&hvcsd->open_count);
37880
37881 /*
37882 * Don't kref put inside the spinlock because the destruction
37883@@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37884 hvcsd->tty->driver_data = NULL;
37885 hvcsd->tty = NULL;
37886
37887- hvcsd->open_count = 0;
37888+ local_set(&hvcsd->open_count, 0);
37889
37890 /* This will drop any buffered data on the floor which is OK in a hangup
37891 * scenario. */
37892@@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
37893 * the middle of a write operation? This is a crummy place to do this
37894 * but we want to keep it all in the spinlock.
37895 */
37896- if (hvcsd->open_count <= 0) {
37897+ if (local_read(&hvcsd->open_count) <= 0) {
37898 spin_unlock_irqrestore(&hvcsd->lock, flags);
37899 return -ENODEV;
37900 }
37901@@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37902 {
37903 struct hvcs_struct *hvcsd = tty->driver_data;
37904
37905- if (!hvcsd || hvcsd->open_count <= 0)
37906+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37907 return 0;
37908
37909 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37910diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37911index ef92869..f4ebd88 100644
37912--- a/drivers/tty/ipwireless/tty.c
37913+++ b/drivers/tty/ipwireless/tty.c
37914@@ -29,6 +29,7 @@
37915 #include <linux/tty_driver.h>
37916 #include <linux/tty_flip.h>
37917 #include <linux/uaccess.h>
37918+#include <asm/local.h>
37919
37920 #include "tty.h"
37921 #include "network.h"
37922@@ -51,7 +52,7 @@ struct ipw_tty {
37923 int tty_type;
37924 struct ipw_network *network;
37925 struct tty_struct *linux_tty;
37926- int open_count;
37927+ local_t open_count;
37928 unsigned int control_lines;
37929 struct mutex ipw_tty_mutex;
37930 int tx_bytes_queued;
37931@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37932 mutex_unlock(&tty->ipw_tty_mutex);
37933 return -ENODEV;
37934 }
37935- if (tty->open_count == 0)
37936+ if (local_read(&tty->open_count) == 0)
37937 tty->tx_bytes_queued = 0;
37938
37939- tty->open_count++;
37940+ local_inc(&tty->open_count);
37941
37942 tty->linux_tty = linux_tty;
37943 linux_tty->driver_data = tty;
37944@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37945
37946 static void do_ipw_close(struct ipw_tty *tty)
37947 {
37948- tty->open_count--;
37949-
37950- if (tty->open_count == 0) {
37951+ if (local_dec_return(&tty->open_count) == 0) {
37952 struct tty_struct *linux_tty = tty->linux_tty;
37953
37954 if (linux_tty != NULL) {
37955@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37956 return;
37957
37958 mutex_lock(&tty->ipw_tty_mutex);
37959- if (tty->open_count == 0) {
37960+ if (local_read(&tty->open_count) == 0) {
37961 mutex_unlock(&tty->ipw_tty_mutex);
37962 return;
37963 }
37964@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37965 return;
37966 }
37967
37968- if (!tty->open_count) {
37969+ if (!local_read(&tty->open_count)) {
37970 mutex_unlock(&tty->ipw_tty_mutex);
37971 return;
37972 }
37973@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37974 return -ENODEV;
37975
37976 mutex_lock(&tty->ipw_tty_mutex);
37977- if (!tty->open_count) {
37978+ if (!local_read(&tty->open_count)) {
37979 mutex_unlock(&tty->ipw_tty_mutex);
37980 return -EINVAL;
37981 }
37982@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37983 if (!tty)
37984 return -ENODEV;
37985
37986- if (!tty->open_count)
37987+ if (!local_read(&tty->open_count))
37988 return -EINVAL;
37989
37990 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37991@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37992 if (!tty)
37993 return 0;
37994
37995- if (!tty->open_count)
37996+ if (!local_read(&tty->open_count))
37997 return 0;
37998
37999 return tty->tx_bytes_queued;
38000@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
38001 if (!tty)
38002 return -ENODEV;
38003
38004- if (!tty->open_count)
38005+ if (!local_read(&tty->open_count))
38006 return -EINVAL;
38007
38008 return get_control_lines(tty);
38009@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
38010 if (!tty)
38011 return -ENODEV;
38012
38013- if (!tty->open_count)
38014+ if (!local_read(&tty->open_count))
38015 return -EINVAL;
38016
38017 return set_control_lines(tty, set, clear);
38018@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
38019 if (!tty)
38020 return -ENODEV;
38021
38022- if (!tty->open_count)
38023+ if (!local_read(&tty->open_count))
38024 return -EINVAL;
38025
38026 /* FIXME: Exactly how is the tty object locked here .. */
38027@@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
38028 against a parallel ioctl etc */
38029 mutex_lock(&ttyj->ipw_tty_mutex);
38030 }
38031- while (ttyj->open_count)
38032+ while (local_read(&ttyj->open_count))
38033 do_ipw_close(ttyj);
38034 ipwireless_disassociate_network_ttys(network,
38035 ttyj->channel_idx);
38036diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
38037index fc7bbba..9527e93 100644
38038--- a/drivers/tty/n_gsm.c
38039+++ b/drivers/tty/n_gsm.c
38040@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
38041 kref_init(&dlci->ref);
38042 mutex_init(&dlci->mutex);
38043 dlci->fifo = &dlci->_fifo;
38044- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
38045+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
38046 kfree(dlci);
38047 return NULL;
38048 }
38049diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
38050index d2256d0..97476fa 100644
38051--- a/drivers/tty/n_tty.c
38052+++ b/drivers/tty/n_tty.c
38053@@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
38054 {
38055 *ops = tty_ldisc_N_TTY;
38056 ops->owner = NULL;
38057- ops->refcount = ops->flags = 0;
38058+ atomic_set(&ops->refcount, 0);
38059+ ops->flags = 0;
38060 }
38061 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
38062diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
38063index d8653ab..f8afd9d 100644
38064--- a/drivers/tty/pty.c
38065+++ b/drivers/tty/pty.c
38066@@ -765,8 +765,10 @@ static void __init unix98_pty_init(void)
38067 register_sysctl_table(pty_root_table);
38068
38069 /* Now create the /dev/ptmx special device */
38070+ pax_open_kernel();
38071 tty_default_fops(&ptmx_fops);
38072- ptmx_fops.open = ptmx_open;
38073+ *(void **)&ptmx_fops.open = ptmx_open;
38074+ pax_close_kernel();
38075
38076 cdev_init(&ptmx_cdev, &ptmx_fops);
38077 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
38078diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
38079index 2b42a01..32a2ed3 100644
38080--- a/drivers/tty/serial/kgdboc.c
38081+++ b/drivers/tty/serial/kgdboc.c
38082@@ -24,8 +24,9 @@
38083 #define MAX_CONFIG_LEN 40
38084
38085 static struct kgdb_io kgdboc_io_ops;
38086+static struct kgdb_io kgdboc_io_ops_console;
38087
38088-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
38089+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
38090 static int configured = -1;
38091
38092 static char config[MAX_CONFIG_LEN];
38093@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
38094 kgdboc_unregister_kbd();
38095 if (configured == 1)
38096 kgdb_unregister_io_module(&kgdboc_io_ops);
38097+ else if (configured == 2)
38098+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
38099 }
38100
38101 static int configure_kgdboc(void)
38102@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
38103 int err;
38104 char *cptr = config;
38105 struct console *cons;
38106+ int is_console = 0;
38107
38108 err = kgdboc_option_setup(config);
38109 if (err || !strlen(config) || isspace(config[0]))
38110 goto noconfig;
38111
38112 err = -ENODEV;
38113- kgdboc_io_ops.is_console = 0;
38114 kgdb_tty_driver = NULL;
38115
38116 kgdboc_use_kms = 0;
38117@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
38118 int idx;
38119 if (cons->device && cons->device(cons, &idx) == p &&
38120 idx == tty_line) {
38121- kgdboc_io_ops.is_console = 1;
38122+ is_console = 1;
38123 break;
38124 }
38125 cons = cons->next;
38126@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
38127 kgdb_tty_line = tty_line;
38128
38129 do_register:
38130- err = kgdb_register_io_module(&kgdboc_io_ops);
38131+ if (is_console) {
38132+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
38133+ configured = 2;
38134+ } else {
38135+ err = kgdb_register_io_module(&kgdboc_io_ops);
38136+ configured = 1;
38137+ }
38138 if (err)
38139 goto noconfig;
38140
38141- configured = 1;
38142-
38143 return 0;
38144
38145 noconfig:
38146@@ -213,7 +220,7 @@ noconfig:
38147 static int __init init_kgdboc(void)
38148 {
38149 /* Already configured? */
38150- if (configured == 1)
38151+ if (configured >= 1)
38152 return 0;
38153
38154 return configure_kgdboc();
38155@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
38156 if (config[len - 1] == '\n')
38157 config[len - 1] = '\0';
38158
38159- if (configured == 1)
38160+ if (configured >= 1)
38161 cleanup_kgdboc();
38162
38163 /* Go and configure with the new params. */
38164@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
38165 .post_exception = kgdboc_post_exp_handler,
38166 };
38167
38168+static struct kgdb_io kgdboc_io_ops_console = {
38169+ .name = "kgdboc",
38170+ .read_char = kgdboc_get_char,
38171+ .write_char = kgdboc_put_char,
38172+ .pre_exception = kgdboc_pre_exp_handler,
38173+ .post_exception = kgdboc_post_exp_handler,
38174+ .is_console = 1
38175+};
38176+
38177 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
38178 /* This is only available if kgdboc is a built in for early debugging */
38179 static int __init kgdboc_early_init(char *opt)
38180diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
38181index 7867b7c..b3c119d 100644
38182--- a/drivers/tty/sysrq.c
38183+++ b/drivers/tty/sysrq.c
38184@@ -862,7 +862,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
38185 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
38186 size_t count, loff_t *ppos)
38187 {
38188- if (count) {
38189+ if (count && capable(CAP_SYS_ADMIN)) {
38190 char c;
38191
38192 if (get_user(c, buf))
38193diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
38194index e41b9bb..84002fb 100644
38195--- a/drivers/tty/tty_io.c
38196+++ b/drivers/tty/tty_io.c
38197@@ -3291,7 +3291,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
38198
38199 void tty_default_fops(struct file_operations *fops)
38200 {
38201- *fops = tty_fops;
38202+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
38203 }
38204
38205 /*
38206diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
38207index 24b95db..9c078d0 100644
38208--- a/drivers/tty/tty_ldisc.c
38209+++ b/drivers/tty/tty_ldisc.c
38210@@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
38211 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
38212 struct tty_ldisc_ops *ldo = ld->ops;
38213
38214- ldo->refcount--;
38215+ atomic_dec(&ldo->refcount);
38216 module_put(ldo->owner);
38217 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38218
38219@@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
38220 spin_lock_irqsave(&tty_ldisc_lock, flags);
38221 tty_ldiscs[disc] = new_ldisc;
38222 new_ldisc->num = disc;
38223- new_ldisc->refcount = 0;
38224+ atomic_set(&new_ldisc->refcount, 0);
38225 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38226
38227 return ret;
38228@@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
38229 return -EINVAL;
38230
38231 spin_lock_irqsave(&tty_ldisc_lock, flags);
38232- if (tty_ldiscs[disc]->refcount)
38233+ if (atomic_read(&tty_ldiscs[disc]->refcount))
38234 ret = -EBUSY;
38235 else
38236 tty_ldiscs[disc] = NULL;
38237@@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
38238 if (ldops) {
38239 ret = ERR_PTR(-EAGAIN);
38240 if (try_module_get(ldops->owner)) {
38241- ldops->refcount++;
38242+ atomic_inc(&ldops->refcount);
38243 ret = ldops;
38244 }
38245 }
38246@@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
38247 unsigned long flags;
38248
38249 spin_lock_irqsave(&tty_ldisc_lock, flags);
38250- ldops->refcount--;
38251+ atomic_dec(&ldops->refcount);
38252 module_put(ldops->owner);
38253 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38254 }
38255diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
38256index a605549..6bd3c96 100644
38257--- a/drivers/tty/vt/keyboard.c
38258+++ b/drivers/tty/vt/keyboard.c
38259@@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
38260 kbd->kbdmode == VC_OFF) &&
38261 value != KVAL(K_SAK))
38262 return; /* SAK is allowed even in raw mode */
38263+
38264+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
38265+ {
38266+ void *func = fn_handler[value];
38267+ if (func == fn_show_state || func == fn_show_ptregs ||
38268+ func == fn_show_mem)
38269+ return;
38270+ }
38271+#endif
38272+
38273 fn_handler[value](vc);
38274 }
38275
38276diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
38277index 65447c5..0526f0a 100644
38278--- a/drivers/tty/vt/vt_ioctl.c
38279+++ b/drivers/tty/vt/vt_ioctl.c
38280@@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
38281 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
38282 return -EFAULT;
38283
38284- if (!capable(CAP_SYS_TTY_CONFIG))
38285- perm = 0;
38286-
38287 switch (cmd) {
38288 case KDGKBENT:
38289 key_map = key_maps[s];
38290@@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
38291 val = (i ? K_HOLE : K_NOSUCHMAP);
38292 return put_user(val, &user_kbe->kb_value);
38293 case KDSKBENT:
38294+ if (!capable(CAP_SYS_TTY_CONFIG))
38295+ perm = 0;
38296+
38297 if (!perm)
38298 return -EPERM;
38299 if (!i && v == K_NOSUCHMAP) {
38300@@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38301 int i, j, k;
38302 int ret;
38303
38304- if (!capable(CAP_SYS_TTY_CONFIG))
38305- perm = 0;
38306-
38307 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
38308 if (!kbs) {
38309 ret = -ENOMEM;
38310@@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38311 kfree(kbs);
38312 return ((p && *p) ? -EOVERFLOW : 0);
38313 case KDSKBSENT:
38314+ if (!capable(CAP_SYS_TTY_CONFIG))
38315+ perm = 0;
38316+
38317 if (!perm) {
38318 ret = -EPERM;
38319 goto reterr;
38320diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
38321index a783d53..cb30d94 100644
38322--- a/drivers/uio/uio.c
38323+++ b/drivers/uio/uio.c
38324@@ -25,6 +25,7 @@
38325 #include <linux/kobject.h>
38326 #include <linux/cdev.h>
38327 #include <linux/uio_driver.h>
38328+#include <asm/local.h>
38329
38330 #define UIO_MAX_DEVICES (1U << MINORBITS)
38331
38332@@ -32,10 +33,10 @@ struct uio_device {
38333 struct module *owner;
38334 struct device *dev;
38335 int minor;
38336- atomic_t event;
38337+ atomic_unchecked_t event;
38338 struct fasync_struct *async_queue;
38339 wait_queue_head_t wait;
38340- int vma_count;
38341+ local_t vma_count;
38342 struct uio_info *info;
38343 struct kobject *map_dir;
38344 struct kobject *portio_dir;
38345@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
38346 struct device_attribute *attr, char *buf)
38347 {
38348 struct uio_device *idev = dev_get_drvdata(dev);
38349- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
38350+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
38351 }
38352
38353 static struct device_attribute uio_class_attributes[] = {
38354@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
38355 {
38356 struct uio_device *idev = info->uio_dev;
38357
38358- atomic_inc(&idev->event);
38359+ atomic_inc_unchecked(&idev->event);
38360 wake_up_interruptible(&idev->wait);
38361 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38362 }
38363@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
38364 }
38365
38366 listener->dev = idev;
38367- listener->event_count = atomic_read(&idev->event);
38368+ listener->event_count = atomic_read_unchecked(&idev->event);
38369 filep->private_data = listener;
38370
38371 if (idev->info->open) {
38372@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
38373 return -EIO;
38374
38375 poll_wait(filep, &idev->wait, wait);
38376- if (listener->event_count != atomic_read(&idev->event))
38377+ if (listener->event_count != atomic_read_unchecked(&idev->event))
38378 return POLLIN | POLLRDNORM;
38379 return 0;
38380 }
38381@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
38382 do {
38383 set_current_state(TASK_INTERRUPTIBLE);
38384
38385- event_count = atomic_read(&idev->event);
38386+ event_count = atomic_read_unchecked(&idev->event);
38387 if (event_count != listener->event_count) {
38388 if (copy_to_user(buf, &event_count, count))
38389 retval = -EFAULT;
38390@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
38391 static void uio_vma_open(struct vm_area_struct *vma)
38392 {
38393 struct uio_device *idev = vma->vm_private_data;
38394- idev->vma_count++;
38395+ local_inc(&idev->vma_count);
38396 }
38397
38398 static void uio_vma_close(struct vm_area_struct *vma)
38399 {
38400 struct uio_device *idev = vma->vm_private_data;
38401- idev->vma_count--;
38402+ local_dec(&idev->vma_count);
38403 }
38404
38405 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38406@@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
38407 idev->owner = owner;
38408 idev->info = info;
38409 init_waitqueue_head(&idev->wait);
38410- atomic_set(&idev->event, 0);
38411+ atomic_set_unchecked(&idev->event, 0);
38412
38413 ret = uio_get_minor(idev);
38414 if (ret)
38415diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
38416index 98b89fe..aff824e 100644
38417--- a/drivers/usb/atm/cxacru.c
38418+++ b/drivers/usb/atm/cxacru.c
38419@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
38420 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
38421 if (ret < 2)
38422 return -EINVAL;
38423- if (index < 0 || index > 0x7f)
38424+ if (index > 0x7f)
38425 return -EINVAL;
38426 pos += tmp;
38427
38428diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
38429index d3448ca..d2864ca 100644
38430--- a/drivers/usb/atm/usbatm.c
38431+++ b/drivers/usb/atm/usbatm.c
38432@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38433 if (printk_ratelimit())
38434 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38435 __func__, vpi, vci);
38436- atomic_inc(&vcc->stats->rx_err);
38437+ atomic_inc_unchecked(&vcc->stats->rx_err);
38438 return;
38439 }
38440
38441@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38442 if (length > ATM_MAX_AAL5_PDU) {
38443 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38444 __func__, length, vcc);
38445- atomic_inc(&vcc->stats->rx_err);
38446+ atomic_inc_unchecked(&vcc->stats->rx_err);
38447 goto out;
38448 }
38449
38450@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38451 if (sarb->len < pdu_length) {
38452 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38453 __func__, pdu_length, sarb->len, vcc);
38454- atomic_inc(&vcc->stats->rx_err);
38455+ atomic_inc_unchecked(&vcc->stats->rx_err);
38456 goto out;
38457 }
38458
38459 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38460 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38461 __func__, vcc);
38462- atomic_inc(&vcc->stats->rx_err);
38463+ atomic_inc_unchecked(&vcc->stats->rx_err);
38464 goto out;
38465 }
38466
38467@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38468 if (printk_ratelimit())
38469 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38470 __func__, length);
38471- atomic_inc(&vcc->stats->rx_drop);
38472+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38473 goto out;
38474 }
38475
38476@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38477
38478 vcc->push(vcc, skb);
38479
38480- atomic_inc(&vcc->stats->rx);
38481+ atomic_inc_unchecked(&vcc->stats->rx);
38482 out:
38483 skb_trim(sarb, 0);
38484 }
38485@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
38486 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38487
38488 usbatm_pop(vcc, skb);
38489- atomic_inc(&vcc->stats->tx);
38490+ atomic_inc_unchecked(&vcc->stats->tx);
38491
38492 skb = skb_dequeue(&instance->sndqueue);
38493 }
38494@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
38495 if (!left--)
38496 return sprintf(page,
38497 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38498- atomic_read(&atm_dev->stats.aal5.tx),
38499- atomic_read(&atm_dev->stats.aal5.tx_err),
38500- atomic_read(&atm_dev->stats.aal5.rx),
38501- atomic_read(&atm_dev->stats.aal5.rx_err),
38502- atomic_read(&atm_dev->stats.aal5.rx_drop));
38503+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38504+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38505+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38506+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38507+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38508
38509 if (!left--) {
38510 if (instance->disconnected)
38511diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
38512index d956965..4179a77 100644
38513--- a/drivers/usb/core/devices.c
38514+++ b/drivers/usb/core/devices.c
38515@@ -126,7 +126,7 @@ static const char format_endpt[] =
38516 * time it gets called.
38517 */
38518 static struct device_connect_event {
38519- atomic_t count;
38520+ atomic_unchecked_t count;
38521 wait_queue_head_t wait;
38522 } device_event = {
38523 .count = ATOMIC_INIT(1),
38524@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
38525
38526 void usbfs_conn_disc_event(void)
38527 {
38528- atomic_add(2, &device_event.count);
38529+ atomic_add_unchecked(2, &device_event.count);
38530 wake_up(&device_event.wait);
38531 }
38532
38533@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
38534
38535 poll_wait(file, &device_event.wait, wait);
38536
38537- event_count = atomic_read(&device_event.count);
38538+ event_count = atomic_read_unchecked(&device_event.count);
38539 if (file->f_version != event_count) {
38540 file->f_version = event_count;
38541 return POLLIN | POLLRDNORM;
38542diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38543index 1fc8f12..20647c1 100644
38544--- a/drivers/usb/early/ehci-dbgp.c
38545+++ b/drivers/usb/early/ehci-dbgp.c
38546@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
38547
38548 #ifdef CONFIG_KGDB
38549 static struct kgdb_io kgdbdbgp_io_ops;
38550-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38551+static struct kgdb_io kgdbdbgp_io_ops_console;
38552+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
38553 #else
38554 #define dbgp_kgdb_mode (0)
38555 #endif
38556@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
38557 .write_char = kgdbdbgp_write_char,
38558 };
38559
38560+static struct kgdb_io kgdbdbgp_io_ops_console = {
38561+ .name = "kgdbdbgp",
38562+ .read_char = kgdbdbgp_read_char,
38563+ .write_char = kgdbdbgp_write_char,
38564+ .is_console = 1
38565+};
38566+
38567 static int kgdbdbgp_wait_time;
38568
38569 static int __init kgdbdbgp_parse_config(char *str)
38570@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
38571 ptr++;
38572 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38573 }
38574- kgdb_register_io_module(&kgdbdbgp_io_ops);
38575- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38576+ if (early_dbgp_console.index != -1)
38577+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38578+ else
38579+ kgdb_register_io_module(&kgdbdbgp_io_ops);
38580
38581 return 0;
38582 }
38583diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38584index d6bea3e..60b250e 100644
38585--- a/drivers/usb/wusbcore/wa-hc.h
38586+++ b/drivers/usb/wusbcore/wa-hc.h
38587@@ -192,7 +192,7 @@ struct wahc {
38588 struct list_head xfer_delayed_list;
38589 spinlock_t xfer_list_lock;
38590 struct work_struct xfer_work;
38591- atomic_t xfer_id_count;
38592+ atomic_unchecked_t xfer_id_count;
38593 };
38594
38595
38596@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
38597 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38598 spin_lock_init(&wa->xfer_list_lock);
38599 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38600- atomic_set(&wa->xfer_id_count, 1);
38601+ atomic_set_unchecked(&wa->xfer_id_count, 1);
38602 }
38603
38604 /**
38605diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
38606index 57c01ab..8a05959 100644
38607--- a/drivers/usb/wusbcore/wa-xfer.c
38608+++ b/drivers/usb/wusbcore/wa-xfer.c
38609@@ -296,7 +296,7 @@ out:
38610 */
38611 static void wa_xfer_id_init(struct wa_xfer *xfer)
38612 {
38613- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38614+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38615 }
38616
38617 /*
38618diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
38619index c14c42b..f955cc2 100644
38620--- a/drivers/vhost/vhost.c
38621+++ b/drivers/vhost/vhost.c
38622@@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
38623 return 0;
38624 }
38625
38626-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38627+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38628 {
38629 struct file *eventfp, *filep = NULL,
38630 *pollstart = NULL, *pollstop = NULL;
38631diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38632index b0b2ac3..89a4399 100644
38633--- a/drivers/video/aty/aty128fb.c
38634+++ b/drivers/video/aty/aty128fb.c
38635@@ -148,7 +148,7 @@ enum {
38636 };
38637
38638 /* Must match above enum */
38639-static const char *r128_family[] __devinitdata = {
38640+static const char *r128_family[] __devinitconst = {
38641 "AGP",
38642 "PCI",
38643 "PRO AGP",
38644diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38645index 5c3960d..15cf8fc 100644
38646--- a/drivers/video/fbcmap.c
38647+++ b/drivers/video/fbcmap.c
38648@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
38649 rc = -ENODEV;
38650 goto out;
38651 }
38652- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38653- !info->fbops->fb_setcmap)) {
38654+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38655 rc = -EINVAL;
38656 goto out1;
38657 }
38658diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
38659index c6ce416..3b9b642 100644
38660--- a/drivers/video/fbmem.c
38661+++ b/drivers/video/fbmem.c
38662@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38663 image->dx += image->width + 8;
38664 }
38665 } else if (rotate == FB_ROTATE_UD) {
38666- for (x = 0; x < num && image->dx >= 0; x++) {
38667+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38668 info->fbops->fb_imageblit(info, image);
38669 image->dx -= image->width + 8;
38670 }
38671@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38672 image->dy += image->height + 8;
38673 }
38674 } else if (rotate == FB_ROTATE_CCW) {
38675- for (x = 0; x < num && image->dy >= 0; x++) {
38676+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38677 info->fbops->fb_imageblit(info, image);
38678 image->dy -= image->height + 8;
38679 }
38680@@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38681 return -EFAULT;
38682 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38683 return -EINVAL;
38684- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38685+ if (con2fb.framebuffer >= FB_MAX)
38686 return -EINVAL;
38687 if (!registered_fb[con2fb.framebuffer])
38688 request_module("fb%d", con2fb.framebuffer);
38689diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38690index 5a5d092..265c5ed 100644
38691--- a/drivers/video/geode/gx1fb_core.c
38692+++ b/drivers/video/geode/gx1fb_core.c
38693@@ -29,7 +29,7 @@ static int crt_option = 1;
38694 static char panel_option[32] = "";
38695
38696 /* Modes relevant to the GX1 (taken from modedb.c) */
38697-static const struct fb_videomode __devinitdata gx1_modedb[] = {
38698+static const struct fb_videomode __devinitconst gx1_modedb[] = {
38699 /* 640x480-60 VESA */
38700 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38701 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
38702diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
38703index 0fad23f..0e9afa4 100644
38704--- a/drivers/video/gxt4500.c
38705+++ b/drivers/video/gxt4500.c
38706@@ -156,7 +156,7 @@ struct gxt4500_par {
38707 static char *mode_option;
38708
38709 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38710-static const struct fb_videomode defaultmode __devinitdata = {
38711+static const struct fb_videomode defaultmode __devinitconst = {
38712 .refresh = 60,
38713 .xres = 1280,
38714 .yres = 1024,
38715@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
38716 return 0;
38717 }
38718
38719-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38720+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38721 .id = "IBM GXT4500P",
38722 .type = FB_TYPE_PACKED_PIXELS,
38723 .visual = FB_VISUAL_PSEUDOCOLOR,
38724diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38725index 7672d2e..b56437f 100644
38726--- a/drivers/video/i810/i810_accel.c
38727+++ b/drivers/video/i810/i810_accel.c
38728@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
38729 }
38730 }
38731 printk("ringbuffer lockup!!!\n");
38732+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38733 i810_report_error(mmio);
38734 par->dev_flags |= LOCKUP;
38735 info->pixmap.scan_align = 1;
38736diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38737index b83f361..2b05a91 100644
38738--- a/drivers/video/i810/i810_main.c
38739+++ b/drivers/video/i810/i810_main.c
38740@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
38741 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38742
38743 /* PCI */
38744-static const char *i810_pci_list[] __devinitdata = {
38745+static const char *i810_pci_list[] __devinitconst = {
38746 "Intel(R) 810 Framebuffer Device" ,
38747 "Intel(R) 810-DC100 Framebuffer Device" ,
38748 "Intel(R) 810E Framebuffer Device" ,
38749diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38750index de36693..3c63fc2 100644
38751--- a/drivers/video/jz4740_fb.c
38752+++ b/drivers/video/jz4740_fb.c
38753@@ -136,7 +136,7 @@ struct jzfb {
38754 uint32_t pseudo_palette[16];
38755 };
38756
38757-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38758+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38759 .id = "JZ4740 FB",
38760 .type = FB_TYPE_PACKED_PIXELS,
38761 .visual = FB_VISUAL_TRUECOLOR,
38762diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38763index 3c14e43..eafa544 100644
38764--- a/drivers/video/logo/logo_linux_clut224.ppm
38765+++ b/drivers/video/logo/logo_linux_clut224.ppm
38766@@ -1,1604 +1,1123 @@
38767 P3
38768-# Standard 224-color Linux logo
38769 80 80
38770 255
38771- 0 0 0 0 0 0 0 0 0 0 0 0
38772- 0 0 0 0 0 0 0 0 0 0 0 0
38773- 0 0 0 0 0 0 0 0 0 0 0 0
38774- 0 0 0 0 0 0 0 0 0 0 0 0
38775- 0 0 0 0 0 0 0 0 0 0 0 0
38776- 0 0 0 0 0 0 0 0 0 0 0 0
38777- 0 0 0 0 0 0 0 0 0 0 0 0
38778- 0 0 0 0 0 0 0 0 0 0 0 0
38779- 0 0 0 0 0 0 0 0 0 0 0 0
38780- 6 6 6 6 6 6 10 10 10 10 10 10
38781- 10 10 10 6 6 6 6 6 6 6 6 6
38782- 0 0 0 0 0 0 0 0 0 0 0 0
38783- 0 0 0 0 0 0 0 0 0 0 0 0
38784- 0 0 0 0 0 0 0 0 0 0 0 0
38785- 0 0 0 0 0 0 0 0 0 0 0 0
38786- 0 0 0 0 0 0 0 0 0 0 0 0
38787- 0 0 0 0 0 0 0 0 0 0 0 0
38788- 0 0 0 0 0 0 0 0 0 0 0 0
38789- 0 0 0 0 0 0 0 0 0 0 0 0
38790- 0 0 0 0 0 0 0 0 0 0 0 0
38791- 0 0 0 0 0 0 0 0 0 0 0 0
38792- 0 0 0 0 0 0 0 0 0 0 0 0
38793- 0 0 0 0 0 0 0 0 0 0 0 0
38794- 0 0 0 0 0 0 0 0 0 0 0 0
38795- 0 0 0 0 0 0 0 0 0 0 0 0
38796- 0 0 0 0 0 0 0 0 0 0 0 0
38797- 0 0 0 0 0 0 0 0 0 0 0 0
38798- 0 0 0 0 0 0 0 0 0 0 0 0
38799- 0 0 0 6 6 6 10 10 10 14 14 14
38800- 22 22 22 26 26 26 30 30 30 34 34 34
38801- 30 30 30 30 30 30 26 26 26 18 18 18
38802- 14 14 14 10 10 10 6 6 6 0 0 0
38803- 0 0 0 0 0 0 0 0 0 0 0 0
38804- 0 0 0 0 0 0 0 0 0 0 0 0
38805- 0 0 0 0 0 0 0 0 0 0 0 0
38806- 0 0 0 0 0 0 0 0 0 0 0 0
38807- 0 0 0 0 0 0 0 0 0 0 0 0
38808- 0 0 0 0 0 0 0 0 0 0 0 0
38809- 0 0 0 0 0 0 0 0 0 0 0 0
38810- 0 0 0 0 0 0 0 0 0 0 0 0
38811- 0 0 0 0 0 0 0 0 0 0 0 0
38812- 0 0 0 0 0 1 0 0 1 0 0 0
38813- 0 0 0 0 0 0 0 0 0 0 0 0
38814- 0 0 0 0 0 0 0 0 0 0 0 0
38815- 0 0 0 0 0 0 0 0 0 0 0 0
38816- 0 0 0 0 0 0 0 0 0 0 0 0
38817- 0 0 0 0 0 0 0 0 0 0 0 0
38818- 0 0 0 0 0 0 0 0 0 0 0 0
38819- 6 6 6 14 14 14 26 26 26 42 42 42
38820- 54 54 54 66 66 66 78 78 78 78 78 78
38821- 78 78 78 74 74 74 66 66 66 54 54 54
38822- 42 42 42 26 26 26 18 18 18 10 10 10
38823- 6 6 6 0 0 0 0 0 0 0 0 0
38824- 0 0 0 0 0 0 0 0 0 0 0 0
38825- 0 0 0 0 0 0 0 0 0 0 0 0
38826- 0 0 0 0 0 0 0 0 0 0 0 0
38827- 0 0 0 0 0 0 0 0 0 0 0 0
38828- 0 0 0 0 0 0 0 0 0 0 0 0
38829- 0 0 0 0 0 0 0 0 0 0 0 0
38830- 0 0 0 0 0 0 0 0 0 0 0 0
38831- 0 0 0 0 0 0 0 0 0 0 0 0
38832- 0 0 1 0 0 0 0 0 0 0 0 0
38833- 0 0 0 0 0 0 0 0 0 0 0 0
38834- 0 0 0 0 0 0 0 0 0 0 0 0
38835- 0 0 0 0 0 0 0 0 0 0 0 0
38836- 0 0 0 0 0 0 0 0 0 0 0 0
38837- 0 0 0 0 0 0 0 0 0 0 0 0
38838- 0 0 0 0 0 0 0 0 0 10 10 10
38839- 22 22 22 42 42 42 66 66 66 86 86 86
38840- 66 66 66 38 38 38 38 38 38 22 22 22
38841- 26 26 26 34 34 34 54 54 54 66 66 66
38842- 86 86 86 70 70 70 46 46 46 26 26 26
38843- 14 14 14 6 6 6 0 0 0 0 0 0
38844- 0 0 0 0 0 0 0 0 0 0 0 0
38845- 0 0 0 0 0 0 0 0 0 0 0 0
38846- 0 0 0 0 0 0 0 0 0 0 0 0
38847- 0 0 0 0 0 0 0 0 0 0 0 0
38848- 0 0 0 0 0 0 0 0 0 0 0 0
38849- 0 0 0 0 0 0 0 0 0 0 0 0
38850- 0 0 0 0 0 0 0 0 0 0 0 0
38851- 0 0 0 0 0 0 0 0 0 0 0 0
38852- 0 0 1 0 0 1 0 0 1 0 0 0
38853- 0 0 0 0 0 0 0 0 0 0 0 0
38854- 0 0 0 0 0 0 0 0 0 0 0 0
38855- 0 0 0 0 0 0 0 0 0 0 0 0
38856- 0 0 0 0 0 0 0 0 0 0 0 0
38857- 0 0 0 0 0 0 0 0 0 0 0 0
38858- 0 0 0 0 0 0 10 10 10 26 26 26
38859- 50 50 50 82 82 82 58 58 58 6 6 6
38860- 2 2 6 2 2 6 2 2 6 2 2 6
38861- 2 2 6 2 2 6 2 2 6 2 2 6
38862- 6 6 6 54 54 54 86 86 86 66 66 66
38863- 38 38 38 18 18 18 6 6 6 0 0 0
38864- 0 0 0 0 0 0 0 0 0 0 0 0
38865- 0 0 0 0 0 0 0 0 0 0 0 0
38866- 0 0 0 0 0 0 0 0 0 0 0 0
38867- 0 0 0 0 0 0 0 0 0 0 0 0
38868- 0 0 0 0 0 0 0 0 0 0 0 0
38869- 0 0 0 0 0 0 0 0 0 0 0 0
38870- 0 0 0 0 0 0 0 0 0 0 0 0
38871- 0 0 0 0 0 0 0 0 0 0 0 0
38872- 0 0 0 0 0 0 0 0 0 0 0 0
38873- 0 0 0 0 0 0 0 0 0 0 0 0
38874- 0 0 0 0 0 0 0 0 0 0 0 0
38875- 0 0 0 0 0 0 0 0 0 0 0 0
38876- 0 0 0 0 0 0 0 0 0 0 0 0
38877- 0 0 0 0 0 0 0 0 0 0 0 0
38878- 0 0 0 6 6 6 22 22 22 50 50 50
38879- 78 78 78 34 34 34 2 2 6 2 2 6
38880- 2 2 6 2 2 6 2 2 6 2 2 6
38881- 2 2 6 2 2 6 2 2 6 2 2 6
38882- 2 2 6 2 2 6 6 6 6 70 70 70
38883- 78 78 78 46 46 46 22 22 22 6 6 6
38884- 0 0 0 0 0 0 0 0 0 0 0 0
38885- 0 0 0 0 0 0 0 0 0 0 0 0
38886- 0 0 0 0 0 0 0 0 0 0 0 0
38887- 0 0 0 0 0 0 0 0 0 0 0 0
38888- 0 0 0 0 0 0 0 0 0 0 0 0
38889- 0 0 0 0 0 0 0 0 0 0 0 0
38890- 0 0 0 0 0 0 0 0 0 0 0 0
38891- 0 0 0 0 0 0 0 0 0 0 0 0
38892- 0 0 1 0 0 1 0 0 1 0 0 0
38893- 0 0 0 0 0 0 0 0 0 0 0 0
38894- 0 0 0 0 0 0 0 0 0 0 0 0
38895- 0 0 0 0 0 0 0 0 0 0 0 0
38896- 0 0 0 0 0 0 0 0 0 0 0 0
38897- 0 0 0 0 0 0 0 0 0 0 0 0
38898- 6 6 6 18 18 18 42 42 42 82 82 82
38899- 26 26 26 2 2 6 2 2 6 2 2 6
38900- 2 2 6 2 2 6 2 2 6 2 2 6
38901- 2 2 6 2 2 6 2 2 6 14 14 14
38902- 46 46 46 34 34 34 6 6 6 2 2 6
38903- 42 42 42 78 78 78 42 42 42 18 18 18
38904- 6 6 6 0 0 0 0 0 0 0 0 0
38905- 0 0 0 0 0 0 0 0 0 0 0 0
38906- 0 0 0 0 0 0 0 0 0 0 0 0
38907- 0 0 0 0 0 0 0 0 0 0 0 0
38908- 0 0 0 0 0 0 0 0 0 0 0 0
38909- 0 0 0 0 0 0 0 0 0 0 0 0
38910- 0 0 0 0 0 0 0 0 0 0 0 0
38911- 0 0 0 0 0 0 0 0 0 0 0 0
38912- 0 0 1 0 0 0 0 0 1 0 0 0
38913- 0 0 0 0 0 0 0 0 0 0 0 0
38914- 0 0 0 0 0 0 0 0 0 0 0 0
38915- 0 0 0 0 0 0 0 0 0 0 0 0
38916- 0 0 0 0 0 0 0 0 0 0 0 0
38917- 0 0 0 0 0 0 0 0 0 0 0 0
38918- 10 10 10 30 30 30 66 66 66 58 58 58
38919- 2 2 6 2 2 6 2 2 6 2 2 6
38920- 2 2 6 2 2 6 2 2 6 2 2 6
38921- 2 2 6 2 2 6 2 2 6 26 26 26
38922- 86 86 86 101 101 101 46 46 46 10 10 10
38923- 2 2 6 58 58 58 70 70 70 34 34 34
38924- 10 10 10 0 0 0 0 0 0 0 0 0
38925- 0 0 0 0 0 0 0 0 0 0 0 0
38926- 0 0 0 0 0 0 0 0 0 0 0 0
38927- 0 0 0 0 0 0 0 0 0 0 0 0
38928- 0 0 0 0 0 0 0 0 0 0 0 0
38929- 0 0 0 0 0 0 0 0 0 0 0 0
38930- 0 0 0 0 0 0 0 0 0 0 0 0
38931- 0 0 0 0 0 0 0 0 0 0 0 0
38932- 0 0 1 0 0 1 0 0 1 0 0 0
38933- 0 0 0 0 0 0 0 0 0 0 0 0
38934- 0 0 0 0 0 0 0 0 0 0 0 0
38935- 0 0 0 0 0 0 0 0 0 0 0 0
38936- 0 0 0 0 0 0 0 0 0 0 0 0
38937- 0 0 0 0 0 0 0 0 0 0 0 0
38938- 14 14 14 42 42 42 86 86 86 10 10 10
38939- 2 2 6 2 2 6 2 2 6 2 2 6
38940- 2 2 6 2 2 6 2 2 6 2 2 6
38941- 2 2 6 2 2 6 2 2 6 30 30 30
38942- 94 94 94 94 94 94 58 58 58 26 26 26
38943- 2 2 6 6 6 6 78 78 78 54 54 54
38944- 22 22 22 6 6 6 0 0 0 0 0 0
38945- 0 0 0 0 0 0 0 0 0 0 0 0
38946- 0 0 0 0 0 0 0 0 0 0 0 0
38947- 0 0 0 0 0 0 0 0 0 0 0 0
38948- 0 0 0 0 0 0 0 0 0 0 0 0
38949- 0 0 0 0 0 0 0 0 0 0 0 0
38950- 0 0 0 0 0 0 0 0 0 0 0 0
38951- 0 0 0 0 0 0 0 0 0 0 0 0
38952- 0 0 0 0 0 0 0 0 0 0 0 0
38953- 0 0 0 0 0 0 0 0 0 0 0 0
38954- 0 0 0 0 0 0 0 0 0 0 0 0
38955- 0 0 0 0 0 0 0 0 0 0 0 0
38956- 0 0 0 0 0 0 0 0 0 0 0 0
38957- 0 0 0 0 0 0 0 0 0 6 6 6
38958- 22 22 22 62 62 62 62 62 62 2 2 6
38959- 2 2 6 2 2 6 2 2 6 2 2 6
38960- 2 2 6 2 2 6 2 2 6 2 2 6
38961- 2 2 6 2 2 6 2 2 6 26 26 26
38962- 54 54 54 38 38 38 18 18 18 10 10 10
38963- 2 2 6 2 2 6 34 34 34 82 82 82
38964- 38 38 38 14 14 14 0 0 0 0 0 0
38965- 0 0 0 0 0 0 0 0 0 0 0 0
38966- 0 0 0 0 0 0 0 0 0 0 0 0
38967- 0 0 0 0 0 0 0 0 0 0 0 0
38968- 0 0 0 0 0 0 0 0 0 0 0 0
38969- 0 0 0 0 0 0 0 0 0 0 0 0
38970- 0 0 0 0 0 0 0 0 0 0 0 0
38971- 0 0 0 0 0 0 0 0 0 0 0 0
38972- 0 0 0 0 0 1 0 0 1 0 0 0
38973- 0 0 0 0 0 0 0 0 0 0 0 0
38974- 0 0 0 0 0 0 0 0 0 0 0 0
38975- 0 0 0 0 0 0 0 0 0 0 0 0
38976- 0 0 0 0 0 0 0 0 0 0 0 0
38977- 0 0 0 0 0 0 0 0 0 6 6 6
38978- 30 30 30 78 78 78 30 30 30 2 2 6
38979- 2 2 6 2 2 6 2 2 6 2 2 6
38980- 2 2 6 2 2 6 2 2 6 2 2 6
38981- 2 2 6 2 2 6 2 2 6 10 10 10
38982- 10 10 10 2 2 6 2 2 6 2 2 6
38983- 2 2 6 2 2 6 2 2 6 78 78 78
38984- 50 50 50 18 18 18 6 6 6 0 0 0
38985- 0 0 0 0 0 0 0 0 0 0 0 0
38986- 0 0 0 0 0 0 0 0 0 0 0 0
38987- 0 0 0 0 0 0 0 0 0 0 0 0
38988- 0 0 0 0 0 0 0 0 0 0 0 0
38989- 0 0 0 0 0 0 0 0 0 0 0 0
38990- 0 0 0 0 0 0 0 0 0 0 0 0
38991- 0 0 0 0 0 0 0 0 0 0 0 0
38992- 0 0 1 0 0 0 0 0 0 0 0 0
38993- 0 0 0 0 0 0 0 0 0 0 0 0
38994- 0 0 0 0 0 0 0 0 0 0 0 0
38995- 0 0 0 0 0 0 0 0 0 0 0 0
38996- 0 0 0 0 0 0 0 0 0 0 0 0
38997- 0 0 0 0 0 0 0 0 0 10 10 10
38998- 38 38 38 86 86 86 14 14 14 2 2 6
38999- 2 2 6 2 2 6 2 2 6 2 2 6
39000- 2 2 6 2 2 6 2 2 6 2 2 6
39001- 2 2 6 2 2 6 2 2 6 2 2 6
39002- 2 2 6 2 2 6 2 2 6 2 2 6
39003- 2 2 6 2 2 6 2 2 6 54 54 54
39004- 66 66 66 26 26 26 6 6 6 0 0 0
39005- 0 0 0 0 0 0 0 0 0 0 0 0
39006- 0 0 0 0 0 0 0 0 0 0 0 0
39007- 0 0 0 0 0 0 0 0 0 0 0 0
39008- 0 0 0 0 0 0 0 0 0 0 0 0
39009- 0 0 0 0 0 0 0 0 0 0 0 0
39010- 0 0 0 0 0 0 0 0 0 0 0 0
39011- 0 0 0 0 0 0 0 0 0 0 0 0
39012- 0 0 0 0 0 1 0 0 1 0 0 0
39013- 0 0 0 0 0 0 0 0 0 0 0 0
39014- 0 0 0 0 0 0 0 0 0 0 0 0
39015- 0 0 0 0 0 0 0 0 0 0 0 0
39016- 0 0 0 0 0 0 0 0 0 0 0 0
39017- 0 0 0 0 0 0 0 0 0 14 14 14
39018- 42 42 42 82 82 82 2 2 6 2 2 6
39019- 2 2 6 6 6 6 10 10 10 2 2 6
39020- 2 2 6 2 2 6 2 2 6 2 2 6
39021- 2 2 6 2 2 6 2 2 6 6 6 6
39022- 14 14 14 10 10 10 2 2 6 2 2 6
39023- 2 2 6 2 2 6 2 2 6 18 18 18
39024- 82 82 82 34 34 34 10 10 10 0 0 0
39025- 0 0 0 0 0 0 0 0 0 0 0 0
39026- 0 0 0 0 0 0 0 0 0 0 0 0
39027- 0 0 0 0 0 0 0 0 0 0 0 0
39028- 0 0 0 0 0 0 0 0 0 0 0 0
39029- 0 0 0 0 0 0 0 0 0 0 0 0
39030- 0 0 0 0 0 0 0 0 0 0 0 0
39031- 0 0 0 0 0 0 0 0 0 0 0 0
39032- 0 0 1 0 0 0 0 0 0 0 0 0
39033- 0 0 0 0 0 0 0 0 0 0 0 0
39034- 0 0 0 0 0 0 0 0 0 0 0 0
39035- 0 0 0 0 0 0 0 0 0 0 0 0
39036- 0 0 0 0 0 0 0 0 0 0 0 0
39037- 0 0 0 0 0 0 0 0 0 14 14 14
39038- 46 46 46 86 86 86 2 2 6 2 2 6
39039- 6 6 6 6 6 6 22 22 22 34 34 34
39040- 6 6 6 2 2 6 2 2 6 2 2 6
39041- 2 2 6 2 2 6 18 18 18 34 34 34
39042- 10 10 10 50 50 50 22 22 22 2 2 6
39043- 2 2 6 2 2 6 2 2 6 10 10 10
39044- 86 86 86 42 42 42 14 14 14 0 0 0
39045- 0 0 0 0 0 0 0 0 0 0 0 0
39046- 0 0 0 0 0 0 0 0 0 0 0 0
39047- 0 0 0 0 0 0 0 0 0 0 0 0
39048- 0 0 0 0 0 0 0 0 0 0 0 0
39049- 0 0 0 0 0 0 0 0 0 0 0 0
39050- 0 0 0 0 0 0 0 0 0 0 0 0
39051- 0 0 0 0 0 0 0 0 0 0 0 0
39052- 0 0 1 0 0 1 0 0 1 0 0 0
39053- 0 0 0 0 0 0 0 0 0 0 0 0
39054- 0 0 0 0 0 0 0 0 0 0 0 0
39055- 0 0 0 0 0 0 0 0 0 0 0 0
39056- 0 0 0 0 0 0 0 0 0 0 0 0
39057- 0 0 0 0 0 0 0 0 0 14 14 14
39058- 46 46 46 86 86 86 2 2 6 2 2 6
39059- 38 38 38 116 116 116 94 94 94 22 22 22
39060- 22 22 22 2 2 6 2 2 6 2 2 6
39061- 14 14 14 86 86 86 138 138 138 162 162 162
39062-154 154 154 38 38 38 26 26 26 6 6 6
39063- 2 2 6 2 2 6 2 2 6 2 2 6
39064- 86 86 86 46 46 46 14 14 14 0 0 0
39065- 0 0 0 0 0 0 0 0 0 0 0 0
39066- 0 0 0 0 0 0 0 0 0 0 0 0
39067- 0 0 0 0 0 0 0 0 0 0 0 0
39068- 0 0 0 0 0 0 0 0 0 0 0 0
39069- 0 0 0 0 0 0 0 0 0 0 0 0
39070- 0 0 0 0 0 0 0 0 0 0 0 0
39071- 0 0 0 0 0 0 0 0 0 0 0 0
39072- 0 0 0 0 0 0 0 0 0 0 0 0
39073- 0 0 0 0 0 0 0 0 0 0 0 0
39074- 0 0 0 0 0 0 0 0 0 0 0 0
39075- 0 0 0 0 0 0 0 0 0 0 0 0
39076- 0 0 0 0 0 0 0 0 0 0 0 0
39077- 0 0 0 0 0 0 0 0 0 14 14 14
39078- 46 46 46 86 86 86 2 2 6 14 14 14
39079-134 134 134 198 198 198 195 195 195 116 116 116
39080- 10 10 10 2 2 6 2 2 6 6 6 6
39081-101 98 89 187 187 187 210 210 210 218 218 218
39082-214 214 214 134 134 134 14 14 14 6 6 6
39083- 2 2 6 2 2 6 2 2 6 2 2 6
39084- 86 86 86 50 50 50 18 18 18 6 6 6
39085- 0 0 0 0 0 0 0 0 0 0 0 0
39086- 0 0 0 0 0 0 0 0 0 0 0 0
39087- 0 0 0 0 0 0 0 0 0 0 0 0
39088- 0 0 0 0 0 0 0 0 0 0 0 0
39089- 0 0 0 0 0 0 0 0 0 0 0 0
39090- 0 0 0 0 0 0 0 0 0 0 0 0
39091- 0 0 0 0 0 0 0 0 1 0 0 0
39092- 0 0 1 0 0 1 0 0 1 0 0 0
39093- 0 0 0 0 0 0 0 0 0 0 0 0
39094- 0 0 0 0 0 0 0 0 0 0 0 0
39095- 0 0 0 0 0 0 0 0 0 0 0 0
39096- 0 0 0 0 0 0 0 0 0 0 0 0
39097- 0 0 0 0 0 0 0 0 0 14 14 14
39098- 46 46 46 86 86 86 2 2 6 54 54 54
39099-218 218 218 195 195 195 226 226 226 246 246 246
39100- 58 58 58 2 2 6 2 2 6 30 30 30
39101-210 210 210 253 253 253 174 174 174 123 123 123
39102-221 221 221 234 234 234 74 74 74 2 2 6
39103- 2 2 6 2 2 6 2 2 6 2 2 6
39104- 70 70 70 58 58 58 22 22 22 6 6 6
39105- 0 0 0 0 0 0 0 0 0 0 0 0
39106- 0 0 0 0 0 0 0 0 0 0 0 0
39107- 0 0 0 0 0 0 0 0 0 0 0 0
39108- 0 0 0 0 0 0 0 0 0 0 0 0
39109- 0 0 0 0 0 0 0 0 0 0 0 0
39110- 0 0 0 0 0 0 0 0 0 0 0 0
39111- 0 0 0 0 0 0 0 0 0 0 0 0
39112- 0 0 0 0 0 0 0 0 0 0 0 0
39113- 0 0 0 0 0 0 0 0 0 0 0 0
39114- 0 0 0 0 0 0 0 0 0 0 0 0
39115- 0 0 0 0 0 0 0 0 0 0 0 0
39116- 0 0 0 0 0 0 0 0 0 0 0 0
39117- 0 0 0 0 0 0 0 0 0 14 14 14
39118- 46 46 46 82 82 82 2 2 6 106 106 106
39119-170 170 170 26 26 26 86 86 86 226 226 226
39120-123 123 123 10 10 10 14 14 14 46 46 46
39121-231 231 231 190 190 190 6 6 6 70 70 70
39122- 90 90 90 238 238 238 158 158 158 2 2 6
39123- 2 2 6 2 2 6 2 2 6 2 2 6
39124- 70 70 70 58 58 58 22 22 22 6 6 6
39125- 0 0 0 0 0 0 0 0 0 0 0 0
39126- 0 0 0 0 0 0 0 0 0 0 0 0
39127- 0 0 0 0 0 0 0 0 0 0 0 0
39128- 0 0 0 0 0 0 0 0 0 0 0 0
39129- 0 0 0 0 0 0 0 0 0 0 0 0
39130- 0 0 0 0 0 0 0 0 0 0 0 0
39131- 0 0 0 0 0 0 0 0 1 0 0 0
39132- 0 0 1 0 0 1 0 0 1 0 0 0
39133- 0 0 0 0 0 0 0 0 0 0 0 0
39134- 0 0 0 0 0 0 0 0 0 0 0 0
39135- 0 0 0 0 0 0 0 0 0 0 0 0
39136- 0 0 0 0 0 0 0 0 0 0 0 0
39137- 0 0 0 0 0 0 0 0 0 14 14 14
39138- 42 42 42 86 86 86 6 6 6 116 116 116
39139-106 106 106 6 6 6 70 70 70 149 149 149
39140-128 128 128 18 18 18 38 38 38 54 54 54
39141-221 221 221 106 106 106 2 2 6 14 14 14
39142- 46 46 46 190 190 190 198 198 198 2 2 6
39143- 2 2 6 2 2 6 2 2 6 2 2 6
39144- 74 74 74 62 62 62 22 22 22 6 6 6
39145- 0 0 0 0 0 0 0 0 0 0 0 0
39146- 0 0 0 0 0 0 0 0 0 0 0 0
39147- 0 0 0 0 0 0 0 0 0 0 0 0
39148- 0 0 0 0 0 0 0 0 0 0 0 0
39149- 0 0 0 0 0 0 0 0 0 0 0 0
39150- 0 0 0 0 0 0 0 0 0 0 0 0
39151- 0 0 0 0 0 0 0 0 1 0 0 0
39152- 0 0 1 0 0 0 0 0 1 0 0 0
39153- 0 0 0 0 0 0 0 0 0 0 0 0
39154- 0 0 0 0 0 0 0 0 0 0 0 0
39155- 0 0 0 0 0 0 0 0 0 0 0 0
39156- 0 0 0 0 0 0 0 0 0 0 0 0
39157- 0 0 0 0 0 0 0 0 0 14 14 14
39158- 42 42 42 94 94 94 14 14 14 101 101 101
39159-128 128 128 2 2 6 18 18 18 116 116 116
39160-118 98 46 121 92 8 121 92 8 98 78 10
39161-162 162 162 106 106 106 2 2 6 2 2 6
39162- 2 2 6 195 195 195 195 195 195 6 6 6
39163- 2 2 6 2 2 6 2 2 6 2 2 6
39164- 74 74 74 62 62 62 22 22 22 6 6 6
39165- 0 0 0 0 0 0 0 0 0 0 0 0
39166- 0 0 0 0 0 0 0 0 0 0 0 0
39167- 0 0 0 0 0 0 0 0 0 0 0 0
39168- 0 0 0 0 0 0 0 0 0 0 0 0
39169- 0 0 0 0 0 0 0 0 0 0 0 0
39170- 0 0 0 0 0 0 0 0 0 0 0 0
39171- 0 0 0 0 0 0 0 0 1 0 0 1
39172- 0 0 1 0 0 0 0 0 1 0 0 0
39173- 0 0 0 0 0 0 0 0 0 0 0 0
39174- 0 0 0 0 0 0 0 0 0 0 0 0
39175- 0 0 0 0 0 0 0 0 0 0 0 0
39176- 0 0 0 0 0 0 0 0 0 0 0 0
39177- 0 0 0 0 0 0 0 0 0 10 10 10
39178- 38 38 38 90 90 90 14 14 14 58 58 58
39179-210 210 210 26 26 26 54 38 6 154 114 10
39180-226 170 11 236 186 11 225 175 15 184 144 12
39181-215 174 15 175 146 61 37 26 9 2 2 6
39182- 70 70 70 246 246 246 138 138 138 2 2 6
39183- 2 2 6 2 2 6 2 2 6 2 2 6
39184- 70 70 70 66 66 66 26 26 26 6 6 6
39185- 0 0 0 0 0 0 0 0 0 0 0 0
39186- 0 0 0 0 0 0 0 0 0 0 0 0
39187- 0 0 0 0 0 0 0 0 0 0 0 0
39188- 0 0 0 0 0 0 0 0 0 0 0 0
39189- 0 0 0 0 0 0 0 0 0 0 0 0
39190- 0 0 0 0 0 0 0 0 0 0 0 0
39191- 0 0 0 0 0 0 0 0 0 0 0 0
39192- 0 0 0 0 0 0 0 0 0 0 0 0
39193- 0 0 0 0 0 0 0 0 0 0 0 0
39194- 0 0 0 0 0 0 0 0 0 0 0 0
39195- 0 0 0 0 0 0 0 0 0 0 0 0
39196- 0 0 0 0 0 0 0 0 0 0 0 0
39197- 0 0 0 0 0 0 0 0 0 10 10 10
39198- 38 38 38 86 86 86 14 14 14 10 10 10
39199-195 195 195 188 164 115 192 133 9 225 175 15
39200-239 182 13 234 190 10 232 195 16 232 200 30
39201-245 207 45 241 208 19 232 195 16 184 144 12
39202-218 194 134 211 206 186 42 42 42 2 2 6
39203- 2 2 6 2 2 6 2 2 6 2 2 6
39204- 50 50 50 74 74 74 30 30 30 6 6 6
39205- 0 0 0 0 0 0 0 0 0 0 0 0
39206- 0 0 0 0 0 0 0 0 0 0 0 0
39207- 0 0 0 0 0 0 0 0 0 0 0 0
39208- 0 0 0 0 0 0 0 0 0 0 0 0
39209- 0 0 0 0 0 0 0 0 0 0 0 0
39210- 0 0 0 0 0 0 0 0 0 0 0 0
39211- 0 0 0 0 0 0 0 0 0 0 0 0
39212- 0 0 0 0 0 0 0 0 0 0 0 0
39213- 0 0 0 0 0 0 0 0 0 0 0 0
39214- 0 0 0 0 0 0 0 0 0 0 0 0
39215- 0 0 0 0 0 0 0 0 0 0 0 0
39216- 0 0 0 0 0 0 0 0 0 0 0 0
39217- 0 0 0 0 0 0 0 0 0 10 10 10
39218- 34 34 34 86 86 86 14 14 14 2 2 6
39219-121 87 25 192 133 9 219 162 10 239 182 13
39220-236 186 11 232 195 16 241 208 19 244 214 54
39221-246 218 60 246 218 38 246 215 20 241 208 19
39222-241 208 19 226 184 13 121 87 25 2 2 6
39223- 2 2 6 2 2 6 2 2 6 2 2 6
39224- 50 50 50 82 82 82 34 34 34 10 10 10
39225- 0 0 0 0 0 0 0 0 0 0 0 0
39226- 0 0 0 0 0 0 0 0 0 0 0 0
39227- 0 0 0 0 0 0 0 0 0 0 0 0
39228- 0 0 0 0 0 0 0 0 0 0 0 0
39229- 0 0 0 0 0 0 0 0 0 0 0 0
39230- 0 0 0 0 0 0 0 0 0 0 0 0
39231- 0 0 0 0 0 0 0 0 0 0 0 0
39232- 0 0 0 0 0 0 0 0 0 0 0 0
39233- 0 0 0 0 0 0 0 0 0 0 0 0
39234- 0 0 0 0 0 0 0 0 0 0 0 0
39235- 0 0 0 0 0 0 0 0 0 0 0 0
39236- 0 0 0 0 0 0 0 0 0 0 0 0
39237- 0 0 0 0 0 0 0 0 0 10 10 10
39238- 34 34 34 82 82 82 30 30 30 61 42 6
39239-180 123 7 206 145 10 230 174 11 239 182 13
39240-234 190 10 238 202 15 241 208 19 246 218 74
39241-246 218 38 246 215 20 246 215 20 246 215 20
39242-226 184 13 215 174 15 184 144 12 6 6 6
39243- 2 2 6 2 2 6 2 2 6 2 2 6
39244- 26 26 26 94 94 94 42 42 42 14 14 14
39245- 0 0 0 0 0 0 0 0 0 0 0 0
39246- 0 0 0 0 0 0 0 0 0 0 0 0
39247- 0 0 0 0 0 0 0 0 0 0 0 0
39248- 0 0 0 0 0 0 0 0 0 0 0 0
39249- 0 0 0 0 0 0 0 0 0 0 0 0
39250- 0 0 0 0 0 0 0 0 0 0 0 0
39251- 0 0 0 0 0 0 0 0 0 0 0 0
39252- 0 0 0 0 0 0 0 0 0 0 0 0
39253- 0 0 0 0 0 0 0 0 0 0 0 0
39254- 0 0 0 0 0 0 0 0 0 0 0 0
39255- 0 0 0 0 0 0 0 0 0 0 0 0
39256- 0 0 0 0 0 0 0 0 0 0 0 0
39257- 0 0 0 0 0 0 0 0 0 10 10 10
39258- 30 30 30 78 78 78 50 50 50 104 69 6
39259-192 133 9 216 158 10 236 178 12 236 186 11
39260-232 195 16 241 208 19 244 214 54 245 215 43
39261-246 215 20 246 215 20 241 208 19 198 155 10
39262-200 144 11 216 158 10 156 118 10 2 2 6
39263- 2 2 6 2 2 6 2 2 6 2 2 6
39264- 6 6 6 90 90 90 54 54 54 18 18 18
39265- 6 6 6 0 0 0 0 0 0 0 0 0
39266- 0 0 0 0 0 0 0 0 0 0 0 0
39267- 0 0 0 0 0 0 0 0 0 0 0 0
39268- 0 0 0 0 0 0 0 0 0 0 0 0
39269- 0 0 0 0 0 0 0 0 0 0 0 0
39270- 0 0 0 0 0 0 0 0 0 0 0 0
39271- 0 0 0 0 0 0 0 0 0 0 0 0
39272- 0 0 0 0 0 0 0 0 0 0 0 0
39273- 0 0 0 0 0 0 0 0 0 0 0 0
39274- 0 0 0 0 0 0 0 0 0 0 0 0
39275- 0 0 0 0 0 0 0 0 0 0 0 0
39276- 0 0 0 0 0 0 0 0 0 0 0 0
39277- 0 0 0 0 0 0 0 0 0 10 10 10
39278- 30 30 30 78 78 78 46 46 46 22 22 22
39279-137 92 6 210 162 10 239 182 13 238 190 10
39280-238 202 15 241 208 19 246 215 20 246 215 20
39281-241 208 19 203 166 17 185 133 11 210 150 10
39282-216 158 10 210 150 10 102 78 10 2 2 6
39283- 6 6 6 54 54 54 14 14 14 2 2 6
39284- 2 2 6 62 62 62 74 74 74 30 30 30
39285- 10 10 10 0 0 0 0 0 0 0 0 0
39286- 0 0 0 0 0 0 0 0 0 0 0 0
39287- 0 0 0 0 0 0 0 0 0 0 0 0
39288- 0 0 0 0 0 0 0 0 0 0 0 0
39289- 0 0 0 0 0 0 0 0 0 0 0 0
39290- 0 0 0 0 0 0 0 0 0 0 0 0
39291- 0 0 0 0 0 0 0 0 0 0 0 0
39292- 0 0 0 0 0 0 0 0 0 0 0 0
39293- 0 0 0 0 0 0 0 0 0 0 0 0
39294- 0 0 0 0 0 0 0 0 0 0 0 0
39295- 0 0 0 0 0 0 0 0 0 0 0 0
39296- 0 0 0 0 0 0 0 0 0 0 0 0
39297- 0 0 0 0 0 0 0 0 0 10 10 10
39298- 34 34 34 78 78 78 50 50 50 6 6 6
39299- 94 70 30 139 102 15 190 146 13 226 184 13
39300-232 200 30 232 195 16 215 174 15 190 146 13
39301-168 122 10 192 133 9 210 150 10 213 154 11
39302-202 150 34 182 157 106 101 98 89 2 2 6
39303- 2 2 6 78 78 78 116 116 116 58 58 58
39304- 2 2 6 22 22 22 90 90 90 46 46 46
39305- 18 18 18 6 6 6 0 0 0 0 0 0
39306- 0 0 0 0 0 0 0 0 0 0 0 0
39307- 0 0 0 0 0 0 0 0 0 0 0 0
39308- 0 0 0 0 0 0 0 0 0 0 0 0
39309- 0 0 0 0 0 0 0 0 0 0 0 0
39310- 0 0 0 0 0 0 0 0 0 0 0 0
39311- 0 0 0 0 0 0 0 0 0 0 0 0
39312- 0 0 0 0 0 0 0 0 0 0 0 0
39313- 0 0 0 0 0 0 0 0 0 0 0 0
39314- 0 0 0 0 0 0 0 0 0 0 0 0
39315- 0 0 0 0 0 0 0 0 0 0 0 0
39316- 0 0 0 0 0 0 0 0 0 0 0 0
39317- 0 0 0 0 0 0 0 0 0 10 10 10
39318- 38 38 38 86 86 86 50 50 50 6 6 6
39319-128 128 128 174 154 114 156 107 11 168 122 10
39320-198 155 10 184 144 12 197 138 11 200 144 11
39321-206 145 10 206 145 10 197 138 11 188 164 115
39322-195 195 195 198 198 198 174 174 174 14 14 14
39323- 2 2 6 22 22 22 116 116 116 116 116 116
39324- 22 22 22 2 2 6 74 74 74 70 70 70
39325- 30 30 30 10 10 10 0 0 0 0 0 0
39326- 0 0 0 0 0 0 0 0 0 0 0 0
39327- 0 0 0 0 0 0 0 0 0 0 0 0
39328- 0 0 0 0 0 0 0 0 0 0 0 0
39329- 0 0 0 0 0 0 0 0 0 0 0 0
39330- 0 0 0 0 0 0 0 0 0 0 0 0
39331- 0 0 0 0 0 0 0 0 0 0 0 0
39332- 0 0 0 0 0 0 0 0 0 0 0 0
39333- 0 0 0 0 0 0 0 0 0 0 0 0
39334- 0 0 0 0 0 0 0 0 0 0 0 0
39335- 0 0 0 0 0 0 0 0 0 0 0 0
39336- 0 0 0 0 0 0 0 0 0 0 0 0
39337- 0 0 0 0 0 0 6 6 6 18 18 18
39338- 50 50 50 101 101 101 26 26 26 10 10 10
39339-138 138 138 190 190 190 174 154 114 156 107 11
39340-197 138 11 200 144 11 197 138 11 192 133 9
39341-180 123 7 190 142 34 190 178 144 187 187 187
39342-202 202 202 221 221 221 214 214 214 66 66 66
39343- 2 2 6 2 2 6 50 50 50 62 62 62
39344- 6 6 6 2 2 6 10 10 10 90 90 90
39345- 50 50 50 18 18 18 6 6 6 0 0 0
39346- 0 0 0 0 0 0 0 0 0 0 0 0
39347- 0 0 0 0 0 0 0 0 0 0 0 0
39348- 0 0 0 0 0 0 0 0 0 0 0 0
39349- 0 0 0 0 0 0 0 0 0 0 0 0
39350- 0 0 0 0 0 0 0 0 0 0 0 0
39351- 0 0 0 0 0 0 0 0 0 0 0 0
39352- 0 0 0 0 0 0 0 0 0 0 0 0
39353- 0 0 0 0 0 0 0 0 0 0 0 0
39354- 0 0 0 0 0 0 0 0 0 0 0 0
39355- 0 0 0 0 0 0 0 0 0 0 0 0
39356- 0 0 0 0 0 0 0 0 0 0 0 0
39357- 0 0 0 0 0 0 10 10 10 34 34 34
39358- 74 74 74 74 74 74 2 2 6 6 6 6
39359-144 144 144 198 198 198 190 190 190 178 166 146
39360-154 121 60 156 107 11 156 107 11 168 124 44
39361-174 154 114 187 187 187 190 190 190 210 210 210
39362-246 246 246 253 253 253 253 253 253 182 182 182
39363- 6 6 6 2 2 6 2 2 6 2 2 6
39364- 2 2 6 2 2 6 2 2 6 62 62 62
39365- 74 74 74 34 34 34 14 14 14 0 0 0
39366- 0 0 0 0 0 0 0 0 0 0 0 0
39367- 0 0 0 0 0 0 0 0 0 0 0 0
39368- 0 0 0 0 0 0 0 0 0 0 0 0
39369- 0 0 0 0 0 0 0 0 0 0 0 0
39370- 0 0 0 0 0 0 0 0 0 0 0 0
39371- 0 0 0 0 0 0 0 0 0 0 0 0
39372- 0 0 0 0 0 0 0 0 0 0 0 0
39373- 0 0 0 0 0 0 0 0 0 0 0 0
39374- 0 0 0 0 0 0 0 0 0 0 0 0
39375- 0 0 0 0 0 0 0 0 0 0 0 0
39376- 0 0 0 0 0 0 0 0 0 0 0 0
39377- 0 0 0 10 10 10 22 22 22 54 54 54
39378- 94 94 94 18 18 18 2 2 6 46 46 46
39379-234 234 234 221 221 221 190 190 190 190 190 190
39380-190 190 190 187 187 187 187 187 187 190 190 190
39381-190 190 190 195 195 195 214 214 214 242 242 242
39382-253 253 253 253 253 253 253 253 253 253 253 253
39383- 82 82 82 2 2 6 2 2 6 2 2 6
39384- 2 2 6 2 2 6 2 2 6 14 14 14
39385- 86 86 86 54 54 54 22 22 22 6 6 6
39386- 0 0 0 0 0 0 0 0 0 0 0 0
39387- 0 0 0 0 0 0 0 0 0 0 0 0
39388- 0 0 0 0 0 0 0 0 0 0 0 0
39389- 0 0 0 0 0 0 0 0 0 0 0 0
39390- 0 0 0 0 0 0 0 0 0 0 0 0
39391- 0 0 0 0 0 0 0 0 0 0 0 0
39392- 0 0 0 0 0 0 0 0 0 0 0 0
39393- 0 0 0 0 0 0 0 0 0 0 0 0
39394- 0 0 0 0 0 0 0 0 0 0 0 0
39395- 0 0 0 0 0 0 0 0 0 0 0 0
39396- 0 0 0 0 0 0 0 0 0 0 0 0
39397- 6 6 6 18 18 18 46 46 46 90 90 90
39398- 46 46 46 18 18 18 6 6 6 182 182 182
39399-253 253 253 246 246 246 206 206 206 190 190 190
39400-190 190 190 190 190 190 190 190 190 190 190 190
39401-206 206 206 231 231 231 250 250 250 253 253 253
39402-253 253 253 253 253 253 253 253 253 253 253 253
39403-202 202 202 14 14 14 2 2 6 2 2 6
39404- 2 2 6 2 2 6 2 2 6 2 2 6
39405- 42 42 42 86 86 86 42 42 42 18 18 18
39406- 6 6 6 0 0 0 0 0 0 0 0 0
39407- 0 0 0 0 0 0 0 0 0 0 0 0
39408- 0 0 0 0 0 0 0 0 0 0 0 0
39409- 0 0 0 0 0 0 0 0 0 0 0 0
39410- 0 0 0 0 0 0 0 0 0 0 0 0
39411- 0 0 0 0 0 0 0 0 0 0 0 0
39412- 0 0 0 0 0 0 0 0 0 0 0 0
39413- 0 0 0 0 0 0 0 0 0 0 0 0
39414- 0 0 0 0 0 0 0 0 0 0 0 0
39415- 0 0 0 0 0 0 0 0 0 0 0 0
39416- 0 0 0 0 0 0 0 0 0 6 6 6
39417- 14 14 14 38 38 38 74 74 74 66 66 66
39418- 2 2 6 6 6 6 90 90 90 250 250 250
39419-253 253 253 253 253 253 238 238 238 198 198 198
39420-190 190 190 190 190 190 195 195 195 221 221 221
39421-246 246 246 253 253 253 253 253 253 253 253 253
39422-253 253 253 253 253 253 253 253 253 253 253 253
39423-253 253 253 82 82 82 2 2 6 2 2 6
39424- 2 2 6 2 2 6 2 2 6 2 2 6
39425- 2 2 6 78 78 78 70 70 70 34 34 34
39426- 14 14 14 6 6 6 0 0 0 0 0 0
39427- 0 0 0 0 0 0 0 0 0 0 0 0
39428- 0 0 0 0 0 0 0 0 0 0 0 0
39429- 0 0 0 0 0 0 0 0 0 0 0 0
39430- 0 0 0 0 0 0 0 0 0 0 0 0
39431- 0 0 0 0 0 0 0 0 0 0 0 0
39432- 0 0 0 0 0 0 0 0 0 0 0 0
39433- 0 0 0 0 0 0 0 0 0 0 0 0
39434- 0 0 0 0 0 0 0 0 0 0 0 0
39435- 0 0 0 0 0 0 0 0 0 0 0 0
39436- 0 0 0 0 0 0 0 0 0 14 14 14
39437- 34 34 34 66 66 66 78 78 78 6 6 6
39438- 2 2 6 18 18 18 218 218 218 253 253 253
39439-253 253 253 253 253 253 253 253 253 246 246 246
39440-226 226 226 231 231 231 246 246 246 253 253 253
39441-253 253 253 253 253 253 253 253 253 253 253 253
39442-253 253 253 253 253 253 253 253 253 253 253 253
39443-253 253 253 178 178 178 2 2 6 2 2 6
39444- 2 2 6 2 2 6 2 2 6 2 2 6
39445- 2 2 6 18 18 18 90 90 90 62 62 62
39446- 30 30 30 10 10 10 0 0 0 0 0 0
39447- 0 0 0 0 0 0 0 0 0 0 0 0
39448- 0 0 0 0 0 0 0 0 0 0 0 0
39449- 0 0 0 0 0 0 0 0 0 0 0 0
39450- 0 0 0 0 0 0 0 0 0 0 0 0
39451- 0 0 0 0 0 0 0 0 0 0 0 0
39452- 0 0 0 0 0 0 0 0 0 0 0 0
39453- 0 0 0 0 0 0 0 0 0 0 0 0
39454- 0 0 0 0 0 0 0 0 0 0 0 0
39455- 0 0 0 0 0 0 0 0 0 0 0 0
39456- 0 0 0 0 0 0 10 10 10 26 26 26
39457- 58 58 58 90 90 90 18 18 18 2 2 6
39458- 2 2 6 110 110 110 253 253 253 253 253 253
39459-253 253 253 253 253 253 253 253 253 253 253 253
39460-250 250 250 253 253 253 253 253 253 253 253 253
39461-253 253 253 253 253 253 253 253 253 253 253 253
39462-253 253 253 253 253 253 253 253 253 253 253 253
39463-253 253 253 231 231 231 18 18 18 2 2 6
39464- 2 2 6 2 2 6 2 2 6 2 2 6
39465- 2 2 6 2 2 6 18 18 18 94 94 94
39466- 54 54 54 26 26 26 10 10 10 0 0 0
39467- 0 0 0 0 0 0 0 0 0 0 0 0
39468- 0 0 0 0 0 0 0 0 0 0 0 0
39469- 0 0 0 0 0 0 0 0 0 0 0 0
39470- 0 0 0 0 0 0 0 0 0 0 0 0
39471- 0 0 0 0 0 0 0 0 0 0 0 0
39472- 0 0 0 0 0 0 0 0 0 0 0 0
39473- 0 0 0 0 0 0 0 0 0 0 0 0
39474- 0 0 0 0 0 0 0 0 0 0 0 0
39475- 0 0 0 0 0 0 0 0 0 0 0 0
39476- 0 0 0 6 6 6 22 22 22 50 50 50
39477- 90 90 90 26 26 26 2 2 6 2 2 6
39478- 14 14 14 195 195 195 250 250 250 253 253 253
39479-253 253 253 253 253 253 253 253 253 253 253 253
39480-253 253 253 253 253 253 253 253 253 253 253 253
39481-253 253 253 253 253 253 253 253 253 253 253 253
39482-253 253 253 253 253 253 253 253 253 253 253 253
39483-250 250 250 242 242 242 54 54 54 2 2 6
39484- 2 2 6 2 2 6 2 2 6 2 2 6
39485- 2 2 6 2 2 6 2 2 6 38 38 38
39486- 86 86 86 50 50 50 22 22 22 6 6 6
39487- 0 0 0 0 0 0 0 0 0 0 0 0
39488- 0 0 0 0 0 0 0 0 0 0 0 0
39489- 0 0 0 0 0 0 0 0 0 0 0 0
39490- 0 0 0 0 0 0 0 0 0 0 0 0
39491- 0 0 0 0 0 0 0 0 0 0 0 0
39492- 0 0 0 0 0 0 0 0 0 0 0 0
39493- 0 0 0 0 0 0 0 0 0 0 0 0
39494- 0 0 0 0 0 0 0 0 0 0 0 0
39495- 0 0 0 0 0 0 0 0 0 0 0 0
39496- 6 6 6 14 14 14 38 38 38 82 82 82
39497- 34 34 34 2 2 6 2 2 6 2 2 6
39498- 42 42 42 195 195 195 246 246 246 253 253 253
39499-253 253 253 253 253 253 253 253 253 250 250 250
39500-242 242 242 242 242 242 250 250 250 253 253 253
39501-253 253 253 253 253 253 253 253 253 253 253 253
39502-253 253 253 250 250 250 246 246 246 238 238 238
39503-226 226 226 231 231 231 101 101 101 6 6 6
39504- 2 2 6 2 2 6 2 2 6 2 2 6
39505- 2 2 6 2 2 6 2 2 6 2 2 6
39506- 38 38 38 82 82 82 42 42 42 14 14 14
39507- 6 6 6 0 0 0 0 0 0 0 0 0
39508- 0 0 0 0 0 0 0 0 0 0 0 0
39509- 0 0 0 0 0 0 0 0 0 0 0 0
39510- 0 0 0 0 0 0 0 0 0 0 0 0
39511- 0 0 0 0 0 0 0 0 0 0 0 0
39512- 0 0 0 0 0 0 0 0 0 0 0 0
39513- 0 0 0 0 0 0 0 0 0 0 0 0
39514- 0 0 0 0 0 0 0 0 0 0 0 0
39515- 0 0 0 0 0 0 0 0 0 0 0 0
39516- 10 10 10 26 26 26 62 62 62 66 66 66
39517- 2 2 6 2 2 6 2 2 6 6 6 6
39518- 70 70 70 170 170 170 206 206 206 234 234 234
39519-246 246 246 250 250 250 250 250 250 238 238 238
39520-226 226 226 231 231 231 238 238 238 250 250 250
39521-250 250 250 250 250 250 246 246 246 231 231 231
39522-214 214 214 206 206 206 202 202 202 202 202 202
39523-198 198 198 202 202 202 182 182 182 18 18 18
39524- 2 2 6 2 2 6 2 2 6 2 2 6
39525- 2 2 6 2 2 6 2 2 6 2 2 6
39526- 2 2 6 62 62 62 66 66 66 30 30 30
39527- 10 10 10 0 0 0 0 0 0 0 0 0
39528- 0 0 0 0 0 0 0 0 0 0 0 0
39529- 0 0 0 0 0 0 0 0 0 0 0 0
39530- 0 0 0 0 0 0 0 0 0 0 0 0
39531- 0 0 0 0 0 0 0 0 0 0 0 0
39532- 0 0 0 0 0 0 0 0 0 0 0 0
39533- 0 0 0 0 0 0 0 0 0 0 0 0
39534- 0 0 0 0 0 0 0 0 0 0 0 0
39535- 0 0 0 0 0 0 0 0 0 0 0 0
39536- 14 14 14 42 42 42 82 82 82 18 18 18
39537- 2 2 6 2 2 6 2 2 6 10 10 10
39538- 94 94 94 182 182 182 218 218 218 242 242 242
39539-250 250 250 253 253 253 253 253 253 250 250 250
39540-234 234 234 253 253 253 253 253 253 253 253 253
39541-253 253 253 253 253 253 253 253 253 246 246 246
39542-238 238 238 226 226 226 210 210 210 202 202 202
39543-195 195 195 195 195 195 210 210 210 158 158 158
39544- 6 6 6 14 14 14 50 50 50 14 14 14
39545- 2 2 6 2 2 6 2 2 6 2 2 6
39546- 2 2 6 6 6 6 86 86 86 46 46 46
39547- 18 18 18 6 6 6 0 0 0 0 0 0
39548- 0 0 0 0 0 0 0 0 0 0 0 0
39549- 0 0 0 0 0 0 0 0 0 0 0 0
39550- 0 0 0 0 0 0 0 0 0 0 0 0
39551- 0 0 0 0 0 0 0 0 0 0 0 0
39552- 0 0 0 0 0 0 0 0 0 0 0 0
39553- 0 0 0 0 0 0 0 0 0 0 0 0
39554- 0 0 0 0 0 0 0 0 0 0 0 0
39555- 0 0 0 0 0 0 0 0 0 6 6 6
39556- 22 22 22 54 54 54 70 70 70 2 2 6
39557- 2 2 6 10 10 10 2 2 6 22 22 22
39558-166 166 166 231 231 231 250 250 250 253 253 253
39559-253 253 253 253 253 253 253 253 253 250 250 250
39560-242 242 242 253 253 253 253 253 253 253 253 253
39561-253 253 253 253 253 253 253 253 253 253 253 253
39562-253 253 253 253 253 253 253 253 253 246 246 246
39563-231 231 231 206 206 206 198 198 198 226 226 226
39564- 94 94 94 2 2 6 6 6 6 38 38 38
39565- 30 30 30 2 2 6 2 2 6 2 2 6
39566- 2 2 6 2 2 6 62 62 62 66 66 66
39567- 26 26 26 10 10 10 0 0 0 0 0 0
39568- 0 0 0 0 0 0 0 0 0 0 0 0
39569- 0 0 0 0 0 0 0 0 0 0 0 0
39570- 0 0 0 0 0 0 0 0 0 0 0 0
39571- 0 0 0 0 0 0 0 0 0 0 0 0
39572- 0 0 0 0 0 0 0 0 0 0 0 0
39573- 0 0 0 0 0 0 0 0 0 0 0 0
39574- 0 0 0 0 0 0 0 0 0 0 0 0
39575- 0 0 0 0 0 0 0 0 0 10 10 10
39576- 30 30 30 74 74 74 50 50 50 2 2 6
39577- 26 26 26 26 26 26 2 2 6 106 106 106
39578-238 238 238 253 253 253 253 253 253 253 253 253
39579-253 253 253 253 253 253 253 253 253 253 253 253
39580-253 253 253 253 253 253 253 253 253 253 253 253
39581-253 253 253 253 253 253 253 253 253 253 253 253
39582-253 253 253 253 253 253 253 253 253 253 253 253
39583-253 253 253 246 246 246 218 218 218 202 202 202
39584-210 210 210 14 14 14 2 2 6 2 2 6
39585- 30 30 30 22 22 22 2 2 6 2 2 6
39586- 2 2 6 2 2 6 18 18 18 86 86 86
39587- 42 42 42 14 14 14 0 0 0 0 0 0
39588- 0 0 0 0 0 0 0 0 0 0 0 0
39589- 0 0 0 0 0 0 0 0 0 0 0 0
39590- 0 0 0 0 0 0 0 0 0 0 0 0
39591- 0 0 0 0 0 0 0 0 0 0 0 0
39592- 0 0 0 0 0 0 0 0 0 0 0 0
39593- 0 0 0 0 0 0 0 0 0 0 0 0
39594- 0 0 0 0 0 0 0 0 0 0 0 0
39595- 0 0 0 0 0 0 0 0 0 14 14 14
39596- 42 42 42 90 90 90 22 22 22 2 2 6
39597- 42 42 42 2 2 6 18 18 18 218 218 218
39598-253 253 253 253 253 253 253 253 253 253 253 253
39599-253 253 253 253 253 253 253 253 253 253 253 253
39600-253 253 253 253 253 253 253 253 253 253 253 253
39601-253 253 253 253 253 253 253 253 253 253 253 253
39602-253 253 253 253 253 253 253 253 253 253 253 253
39603-253 253 253 253 253 253 250 250 250 221 221 221
39604-218 218 218 101 101 101 2 2 6 14 14 14
39605- 18 18 18 38 38 38 10 10 10 2 2 6
39606- 2 2 6 2 2 6 2 2 6 78 78 78
39607- 58 58 58 22 22 22 6 6 6 0 0 0
39608- 0 0 0 0 0 0 0 0 0 0 0 0
39609- 0 0 0 0 0 0 0 0 0 0 0 0
39610- 0 0 0 0 0 0 0 0 0 0 0 0
39611- 0 0 0 0 0 0 0 0 0 0 0 0
39612- 0 0 0 0 0 0 0 0 0 0 0 0
39613- 0 0 0 0 0 0 0 0 0 0 0 0
39614- 0 0 0 0 0 0 0 0 0 0 0 0
39615- 0 0 0 0 0 0 6 6 6 18 18 18
39616- 54 54 54 82 82 82 2 2 6 26 26 26
39617- 22 22 22 2 2 6 123 123 123 253 253 253
39618-253 253 253 253 253 253 253 253 253 253 253 253
39619-253 253 253 253 253 253 253 253 253 253 253 253
39620-253 253 253 253 253 253 253 253 253 253 253 253
39621-253 253 253 253 253 253 253 253 253 253 253 253
39622-253 253 253 253 253 253 253 253 253 253 253 253
39623-253 253 253 253 253 253 253 253 253 250 250 250
39624-238 238 238 198 198 198 6 6 6 38 38 38
39625- 58 58 58 26 26 26 38 38 38 2 2 6
39626- 2 2 6 2 2 6 2 2 6 46 46 46
39627- 78 78 78 30 30 30 10 10 10 0 0 0
39628- 0 0 0 0 0 0 0 0 0 0 0 0
39629- 0 0 0 0 0 0 0 0 0 0 0 0
39630- 0 0 0 0 0 0 0 0 0 0 0 0
39631- 0 0 0 0 0 0 0 0 0 0 0 0
39632- 0 0 0 0 0 0 0 0 0 0 0 0
39633- 0 0 0 0 0 0 0 0 0 0 0 0
39634- 0 0 0 0 0 0 0 0 0 0 0 0
39635- 0 0 0 0 0 0 10 10 10 30 30 30
39636- 74 74 74 58 58 58 2 2 6 42 42 42
39637- 2 2 6 22 22 22 231 231 231 253 253 253
39638-253 253 253 253 253 253 253 253 253 253 253 253
39639-253 253 253 253 253 253 253 253 253 250 250 250
39640-253 253 253 253 253 253 253 253 253 253 253 253
39641-253 253 253 253 253 253 253 253 253 253 253 253
39642-253 253 253 253 253 253 253 253 253 253 253 253
39643-253 253 253 253 253 253 253 253 253 253 253 253
39644-253 253 253 246 246 246 46 46 46 38 38 38
39645- 42 42 42 14 14 14 38 38 38 14 14 14
39646- 2 2 6 2 2 6 2 2 6 6 6 6
39647- 86 86 86 46 46 46 14 14 14 0 0 0
39648- 0 0 0 0 0 0 0 0 0 0 0 0
39649- 0 0 0 0 0 0 0 0 0 0 0 0
39650- 0 0 0 0 0 0 0 0 0 0 0 0
39651- 0 0 0 0 0 0 0 0 0 0 0 0
39652- 0 0 0 0 0 0 0 0 0 0 0 0
39653- 0 0 0 0 0 0 0 0 0 0 0 0
39654- 0 0 0 0 0 0 0 0 0 0 0 0
39655- 0 0 0 6 6 6 14 14 14 42 42 42
39656- 90 90 90 18 18 18 18 18 18 26 26 26
39657- 2 2 6 116 116 116 253 253 253 253 253 253
39658-253 253 253 253 253 253 253 253 253 253 253 253
39659-253 253 253 253 253 253 250 250 250 238 238 238
39660-253 253 253 253 253 253 253 253 253 253 253 253
39661-253 253 253 253 253 253 253 253 253 253 253 253
39662-253 253 253 253 253 253 253 253 253 253 253 253
39663-253 253 253 253 253 253 253 253 253 253 253 253
39664-253 253 253 253 253 253 94 94 94 6 6 6
39665- 2 2 6 2 2 6 10 10 10 34 34 34
39666- 2 2 6 2 2 6 2 2 6 2 2 6
39667- 74 74 74 58 58 58 22 22 22 6 6 6
39668- 0 0 0 0 0 0 0 0 0 0 0 0
39669- 0 0 0 0 0 0 0 0 0 0 0 0
39670- 0 0 0 0 0 0 0 0 0 0 0 0
39671- 0 0 0 0 0 0 0 0 0 0 0 0
39672- 0 0 0 0 0 0 0 0 0 0 0 0
39673- 0 0 0 0 0 0 0 0 0 0 0 0
39674- 0 0 0 0 0 0 0 0 0 0 0 0
39675- 0 0 0 10 10 10 26 26 26 66 66 66
39676- 82 82 82 2 2 6 38 38 38 6 6 6
39677- 14 14 14 210 210 210 253 253 253 253 253 253
39678-253 253 253 253 253 253 253 253 253 253 253 253
39679-253 253 253 253 253 253 246 246 246 242 242 242
39680-253 253 253 253 253 253 253 253 253 253 253 253
39681-253 253 253 253 253 253 253 253 253 253 253 253
39682-253 253 253 253 253 253 253 253 253 253 253 253
39683-253 253 253 253 253 253 253 253 253 253 253 253
39684-253 253 253 253 253 253 144 144 144 2 2 6
39685- 2 2 6 2 2 6 2 2 6 46 46 46
39686- 2 2 6 2 2 6 2 2 6 2 2 6
39687- 42 42 42 74 74 74 30 30 30 10 10 10
39688- 0 0 0 0 0 0 0 0 0 0 0 0
39689- 0 0 0 0 0 0 0 0 0 0 0 0
39690- 0 0 0 0 0 0 0 0 0 0 0 0
39691- 0 0 0 0 0 0 0 0 0 0 0 0
39692- 0 0 0 0 0 0 0 0 0 0 0 0
39693- 0 0 0 0 0 0 0 0 0 0 0 0
39694- 0 0 0 0 0 0 0 0 0 0 0 0
39695- 6 6 6 14 14 14 42 42 42 90 90 90
39696- 26 26 26 6 6 6 42 42 42 2 2 6
39697- 74 74 74 250 250 250 253 253 253 253 253 253
39698-253 253 253 253 253 253 253 253 253 253 253 253
39699-253 253 253 253 253 253 242 242 242 242 242 242
39700-253 253 253 253 253 253 253 253 253 253 253 253
39701-253 253 253 253 253 253 253 253 253 253 253 253
39702-253 253 253 253 253 253 253 253 253 253 253 253
39703-253 253 253 253 253 253 253 253 253 253 253 253
39704-253 253 253 253 253 253 182 182 182 2 2 6
39705- 2 2 6 2 2 6 2 2 6 46 46 46
39706- 2 2 6 2 2 6 2 2 6 2 2 6
39707- 10 10 10 86 86 86 38 38 38 10 10 10
39708- 0 0 0 0 0 0 0 0 0 0 0 0
39709- 0 0 0 0 0 0 0 0 0 0 0 0
39710- 0 0 0 0 0 0 0 0 0 0 0 0
39711- 0 0 0 0 0 0 0 0 0 0 0 0
39712- 0 0 0 0 0 0 0 0 0 0 0 0
39713- 0 0 0 0 0 0 0 0 0 0 0 0
39714- 0 0 0 0 0 0 0 0 0 0 0 0
39715- 10 10 10 26 26 26 66 66 66 82 82 82
39716- 2 2 6 22 22 22 18 18 18 2 2 6
39717-149 149 149 253 253 253 253 253 253 253 253 253
39718-253 253 253 253 253 253 253 253 253 253 253 253
39719-253 253 253 253 253 253 234 234 234 242 242 242
39720-253 253 253 253 253 253 253 253 253 253 253 253
39721-253 253 253 253 253 253 253 253 253 253 253 253
39722-253 253 253 253 253 253 253 253 253 253 253 253
39723-253 253 253 253 253 253 253 253 253 253 253 253
39724-253 253 253 253 253 253 206 206 206 2 2 6
39725- 2 2 6 2 2 6 2 2 6 38 38 38
39726- 2 2 6 2 2 6 2 2 6 2 2 6
39727- 6 6 6 86 86 86 46 46 46 14 14 14
39728- 0 0 0 0 0 0 0 0 0 0 0 0
39729- 0 0 0 0 0 0 0 0 0 0 0 0
39730- 0 0 0 0 0 0 0 0 0 0 0 0
39731- 0 0 0 0 0 0 0 0 0 0 0 0
39732- 0 0 0 0 0 0 0 0 0 0 0 0
39733- 0 0 0 0 0 0 0 0 0 0 0 0
39734- 0 0 0 0 0 0 0 0 0 6 6 6
39735- 18 18 18 46 46 46 86 86 86 18 18 18
39736- 2 2 6 34 34 34 10 10 10 6 6 6
39737-210 210 210 253 253 253 253 253 253 253 253 253
39738-253 253 253 253 253 253 253 253 253 253 253 253
39739-253 253 253 253 253 253 234 234 234 242 242 242
39740-253 253 253 253 253 253 253 253 253 253 253 253
39741-253 253 253 253 253 253 253 253 253 253 253 253
39742-253 253 253 253 253 253 253 253 253 253 253 253
39743-253 253 253 253 253 253 253 253 253 253 253 253
39744-253 253 253 253 253 253 221 221 221 6 6 6
39745- 2 2 6 2 2 6 6 6 6 30 30 30
39746- 2 2 6 2 2 6 2 2 6 2 2 6
39747- 2 2 6 82 82 82 54 54 54 18 18 18
39748- 6 6 6 0 0 0 0 0 0 0 0 0
39749- 0 0 0 0 0 0 0 0 0 0 0 0
39750- 0 0 0 0 0 0 0 0 0 0 0 0
39751- 0 0 0 0 0 0 0 0 0 0 0 0
39752- 0 0 0 0 0 0 0 0 0 0 0 0
39753- 0 0 0 0 0 0 0 0 0 0 0 0
39754- 0 0 0 0 0 0 0 0 0 10 10 10
39755- 26 26 26 66 66 66 62 62 62 2 2 6
39756- 2 2 6 38 38 38 10 10 10 26 26 26
39757-238 238 238 253 253 253 253 253 253 253 253 253
39758-253 253 253 253 253 253 253 253 253 253 253 253
39759-253 253 253 253 253 253 231 231 231 238 238 238
39760-253 253 253 253 253 253 253 253 253 253 253 253
39761-253 253 253 253 253 253 253 253 253 253 253 253
39762-253 253 253 253 253 253 253 253 253 253 253 253
39763-253 253 253 253 253 253 253 253 253 253 253 253
39764-253 253 253 253 253 253 231 231 231 6 6 6
39765- 2 2 6 2 2 6 10 10 10 30 30 30
39766- 2 2 6 2 2 6 2 2 6 2 2 6
39767- 2 2 6 66 66 66 58 58 58 22 22 22
39768- 6 6 6 0 0 0 0 0 0 0 0 0
39769- 0 0 0 0 0 0 0 0 0 0 0 0
39770- 0 0 0 0 0 0 0 0 0 0 0 0
39771- 0 0 0 0 0 0 0 0 0 0 0 0
39772- 0 0 0 0 0 0 0 0 0 0 0 0
39773- 0 0 0 0 0 0 0 0 0 0 0 0
39774- 0 0 0 0 0 0 0 0 0 10 10 10
39775- 38 38 38 78 78 78 6 6 6 2 2 6
39776- 2 2 6 46 46 46 14 14 14 42 42 42
39777-246 246 246 253 253 253 253 253 253 253 253 253
39778-253 253 253 253 253 253 253 253 253 253 253 253
39779-253 253 253 253 253 253 231 231 231 242 242 242
39780-253 253 253 253 253 253 253 253 253 253 253 253
39781-253 253 253 253 253 253 253 253 253 253 253 253
39782-253 253 253 253 253 253 253 253 253 253 253 253
39783-253 253 253 253 253 253 253 253 253 253 253 253
39784-253 253 253 253 253 253 234 234 234 10 10 10
39785- 2 2 6 2 2 6 22 22 22 14 14 14
39786- 2 2 6 2 2 6 2 2 6 2 2 6
39787- 2 2 6 66 66 66 62 62 62 22 22 22
39788- 6 6 6 0 0 0 0 0 0 0 0 0
39789- 0 0 0 0 0 0 0 0 0 0 0 0
39790- 0 0 0 0 0 0 0 0 0 0 0 0
39791- 0 0 0 0 0 0 0 0 0 0 0 0
39792- 0 0 0 0 0 0 0 0 0 0 0 0
39793- 0 0 0 0 0 0 0 0 0 0 0 0
39794- 0 0 0 0 0 0 6 6 6 18 18 18
39795- 50 50 50 74 74 74 2 2 6 2 2 6
39796- 14 14 14 70 70 70 34 34 34 62 62 62
39797-250 250 250 253 253 253 253 253 253 253 253 253
39798-253 253 253 253 253 253 253 253 253 253 253 253
39799-253 253 253 253 253 253 231 231 231 246 246 246
39800-253 253 253 253 253 253 253 253 253 253 253 253
39801-253 253 253 253 253 253 253 253 253 253 253 253
39802-253 253 253 253 253 253 253 253 253 253 253 253
39803-253 253 253 253 253 253 253 253 253 253 253 253
39804-253 253 253 253 253 253 234 234 234 14 14 14
39805- 2 2 6 2 2 6 30 30 30 2 2 6
39806- 2 2 6 2 2 6 2 2 6 2 2 6
39807- 2 2 6 66 66 66 62 62 62 22 22 22
39808- 6 6 6 0 0 0 0 0 0 0 0 0
39809- 0 0 0 0 0 0 0 0 0 0 0 0
39810- 0 0 0 0 0 0 0 0 0 0 0 0
39811- 0 0 0 0 0 0 0 0 0 0 0 0
39812- 0 0 0 0 0 0 0 0 0 0 0 0
39813- 0 0 0 0 0 0 0 0 0 0 0 0
39814- 0 0 0 0 0 0 6 6 6 18 18 18
39815- 54 54 54 62 62 62 2 2 6 2 2 6
39816- 2 2 6 30 30 30 46 46 46 70 70 70
39817-250 250 250 253 253 253 253 253 253 253 253 253
39818-253 253 253 253 253 253 253 253 253 253 253 253
39819-253 253 253 253 253 253 231 231 231 246 246 246
39820-253 253 253 253 253 253 253 253 253 253 253 253
39821-253 253 253 253 253 253 253 253 253 253 253 253
39822-253 253 253 253 253 253 253 253 253 253 253 253
39823-253 253 253 253 253 253 253 253 253 253 253 253
39824-253 253 253 253 253 253 226 226 226 10 10 10
39825- 2 2 6 6 6 6 30 30 30 2 2 6
39826- 2 2 6 2 2 6 2 2 6 2 2 6
39827- 2 2 6 66 66 66 58 58 58 22 22 22
39828- 6 6 6 0 0 0 0 0 0 0 0 0
39829- 0 0 0 0 0 0 0 0 0 0 0 0
39830- 0 0 0 0 0 0 0 0 0 0 0 0
39831- 0 0 0 0 0 0 0 0 0 0 0 0
39832- 0 0 0 0 0 0 0 0 0 0 0 0
39833- 0 0 0 0 0 0 0 0 0 0 0 0
39834- 0 0 0 0 0 0 6 6 6 22 22 22
39835- 58 58 58 62 62 62 2 2 6 2 2 6
39836- 2 2 6 2 2 6 30 30 30 78 78 78
39837-250 250 250 253 253 253 253 253 253 253 253 253
39838-253 253 253 253 253 253 253 253 253 253 253 253
39839-253 253 253 253 253 253 231 231 231 246 246 246
39840-253 253 253 253 253 253 253 253 253 253 253 253
39841-253 253 253 253 253 253 253 253 253 253 253 253
39842-253 253 253 253 253 253 253 253 253 253 253 253
39843-253 253 253 253 253 253 253 253 253 253 253 253
39844-253 253 253 253 253 253 206 206 206 2 2 6
39845- 22 22 22 34 34 34 18 14 6 22 22 22
39846- 26 26 26 18 18 18 6 6 6 2 2 6
39847- 2 2 6 82 82 82 54 54 54 18 18 18
39848- 6 6 6 0 0 0 0 0 0 0 0 0
39849- 0 0 0 0 0 0 0 0 0 0 0 0
39850- 0 0 0 0 0 0 0 0 0 0 0 0
39851- 0 0 0 0 0 0 0 0 0 0 0 0
39852- 0 0 0 0 0 0 0 0 0 0 0 0
39853- 0 0 0 0 0 0 0 0 0 0 0 0
39854- 0 0 0 0 0 0 6 6 6 26 26 26
39855- 62 62 62 106 106 106 74 54 14 185 133 11
39856-210 162 10 121 92 8 6 6 6 62 62 62
39857-238 238 238 253 253 253 253 253 253 253 253 253
39858-253 253 253 253 253 253 253 253 253 253 253 253
39859-253 253 253 253 253 253 231 231 231 246 246 246
39860-253 253 253 253 253 253 253 253 253 253 253 253
39861-253 253 253 253 253 253 253 253 253 253 253 253
39862-253 253 253 253 253 253 253 253 253 253 253 253
39863-253 253 253 253 253 253 253 253 253 253 253 253
39864-253 253 253 253 253 253 158 158 158 18 18 18
39865- 14 14 14 2 2 6 2 2 6 2 2 6
39866- 6 6 6 18 18 18 66 66 66 38 38 38
39867- 6 6 6 94 94 94 50 50 50 18 18 18
39868- 6 6 6 0 0 0 0 0 0 0 0 0
39869- 0 0 0 0 0 0 0 0 0 0 0 0
39870- 0 0 0 0 0 0 0 0 0 0 0 0
39871- 0 0 0 0 0 0 0 0 0 0 0 0
39872- 0 0 0 0 0 0 0 0 0 0 0 0
39873- 0 0 0 0 0 0 0 0 0 6 6 6
39874- 10 10 10 10 10 10 18 18 18 38 38 38
39875- 78 78 78 142 134 106 216 158 10 242 186 14
39876-246 190 14 246 190 14 156 118 10 10 10 10
39877- 90 90 90 238 238 238 253 253 253 253 253 253
39878-253 253 253 253 253 253 253 253 253 253 253 253
39879-253 253 253 253 253 253 231 231 231 250 250 250
39880-253 253 253 253 253 253 253 253 253 253 253 253
39881-253 253 253 253 253 253 253 253 253 253 253 253
39882-253 253 253 253 253 253 253 253 253 253 253 253
39883-253 253 253 253 253 253 253 253 253 246 230 190
39884-238 204 91 238 204 91 181 142 44 37 26 9
39885- 2 2 6 2 2 6 2 2 6 2 2 6
39886- 2 2 6 2 2 6 38 38 38 46 46 46
39887- 26 26 26 106 106 106 54 54 54 18 18 18
39888- 6 6 6 0 0 0 0 0 0 0 0 0
39889- 0 0 0 0 0 0 0 0 0 0 0 0
39890- 0 0 0 0 0 0 0 0 0 0 0 0
39891- 0 0 0 0 0 0 0 0 0 0 0 0
39892- 0 0 0 0 0 0 0 0 0 0 0 0
39893- 0 0 0 6 6 6 14 14 14 22 22 22
39894- 30 30 30 38 38 38 50 50 50 70 70 70
39895-106 106 106 190 142 34 226 170 11 242 186 14
39896-246 190 14 246 190 14 246 190 14 154 114 10
39897- 6 6 6 74 74 74 226 226 226 253 253 253
39898-253 253 253 253 253 253 253 253 253 253 253 253
39899-253 253 253 253 253 253 231 231 231 250 250 250
39900-253 253 253 253 253 253 253 253 253 253 253 253
39901-253 253 253 253 253 253 253 253 253 253 253 253
39902-253 253 253 253 253 253 253 253 253 253 253 253
39903-253 253 253 253 253 253 253 253 253 228 184 62
39904-241 196 14 241 208 19 232 195 16 38 30 10
39905- 2 2 6 2 2 6 2 2 6 2 2 6
39906- 2 2 6 6 6 6 30 30 30 26 26 26
39907-203 166 17 154 142 90 66 66 66 26 26 26
39908- 6 6 6 0 0 0 0 0 0 0 0 0
39909- 0 0 0 0 0 0 0 0 0 0 0 0
39910- 0 0 0 0 0 0 0 0 0 0 0 0
39911- 0 0 0 0 0 0 0 0 0 0 0 0
39912- 0 0 0 0 0 0 0 0 0 0 0 0
39913- 6 6 6 18 18 18 38 38 38 58 58 58
39914- 78 78 78 86 86 86 101 101 101 123 123 123
39915-175 146 61 210 150 10 234 174 13 246 186 14
39916-246 190 14 246 190 14 246 190 14 238 190 10
39917-102 78 10 2 2 6 46 46 46 198 198 198
39918-253 253 253 253 253 253 253 253 253 253 253 253
39919-253 253 253 253 253 253 234 234 234 242 242 242
39920-253 253 253 253 253 253 253 253 253 253 253 253
39921-253 253 253 253 253 253 253 253 253 253 253 253
39922-253 253 253 253 253 253 253 253 253 253 253 253
39923-253 253 253 253 253 253 253 253 253 224 178 62
39924-242 186 14 241 196 14 210 166 10 22 18 6
39925- 2 2 6 2 2 6 2 2 6 2 2 6
39926- 2 2 6 2 2 6 6 6 6 121 92 8
39927-238 202 15 232 195 16 82 82 82 34 34 34
39928- 10 10 10 0 0 0 0 0 0 0 0 0
39929- 0 0 0 0 0 0 0 0 0 0 0 0
39930- 0 0 0 0 0 0 0 0 0 0 0 0
39931- 0 0 0 0 0 0 0 0 0 0 0 0
39932- 0 0 0 0 0 0 0 0 0 0 0 0
39933- 14 14 14 38 38 38 70 70 70 154 122 46
39934-190 142 34 200 144 11 197 138 11 197 138 11
39935-213 154 11 226 170 11 242 186 14 246 190 14
39936-246 190 14 246 190 14 246 190 14 246 190 14
39937-225 175 15 46 32 6 2 2 6 22 22 22
39938-158 158 158 250 250 250 253 253 253 253 253 253
39939-253 253 253 253 253 253 253 253 253 253 253 253
39940-253 253 253 253 253 253 253 253 253 253 253 253
39941-253 253 253 253 253 253 253 253 253 253 253 253
39942-253 253 253 253 253 253 253 253 253 253 253 253
39943-253 253 253 250 250 250 242 242 242 224 178 62
39944-239 182 13 236 186 11 213 154 11 46 32 6
39945- 2 2 6 2 2 6 2 2 6 2 2 6
39946- 2 2 6 2 2 6 61 42 6 225 175 15
39947-238 190 10 236 186 11 112 100 78 42 42 42
39948- 14 14 14 0 0 0 0 0 0 0 0 0
39949- 0 0 0 0 0 0 0 0 0 0 0 0
39950- 0 0 0 0 0 0 0 0 0 0 0 0
39951- 0 0 0 0 0 0 0 0 0 0 0 0
39952- 0 0 0 0 0 0 0 0 0 6 6 6
39953- 22 22 22 54 54 54 154 122 46 213 154 11
39954-226 170 11 230 174 11 226 170 11 226 170 11
39955-236 178 12 242 186 14 246 190 14 246 190 14
39956-246 190 14 246 190 14 246 190 14 246 190 14
39957-241 196 14 184 144 12 10 10 10 2 2 6
39958- 6 6 6 116 116 116 242 242 242 253 253 253
39959-253 253 253 253 253 253 253 253 253 253 253 253
39960-253 253 253 253 253 253 253 253 253 253 253 253
39961-253 253 253 253 253 253 253 253 253 253 253 253
39962-253 253 253 253 253 253 253 253 253 253 253 253
39963-253 253 253 231 231 231 198 198 198 214 170 54
39964-236 178 12 236 178 12 210 150 10 137 92 6
39965- 18 14 6 2 2 6 2 2 6 2 2 6
39966- 6 6 6 70 47 6 200 144 11 236 178 12
39967-239 182 13 239 182 13 124 112 88 58 58 58
39968- 22 22 22 6 6 6 0 0 0 0 0 0
39969- 0 0 0 0 0 0 0 0 0 0 0 0
39970- 0 0 0 0 0 0 0 0 0 0 0 0
39971- 0 0 0 0 0 0 0 0 0 0 0 0
39972- 0 0 0 0 0 0 0 0 0 10 10 10
39973- 30 30 30 70 70 70 180 133 36 226 170 11
39974-239 182 13 242 186 14 242 186 14 246 186 14
39975-246 190 14 246 190 14 246 190 14 246 190 14
39976-246 190 14 246 190 14 246 190 14 246 190 14
39977-246 190 14 232 195 16 98 70 6 2 2 6
39978- 2 2 6 2 2 6 66 66 66 221 221 221
39979-253 253 253 253 253 253 253 253 253 253 253 253
39980-253 253 253 253 253 253 253 253 253 253 253 253
39981-253 253 253 253 253 253 253 253 253 253 253 253
39982-253 253 253 253 253 253 253 253 253 253 253 253
39983-253 253 253 206 206 206 198 198 198 214 166 58
39984-230 174 11 230 174 11 216 158 10 192 133 9
39985-163 110 8 116 81 8 102 78 10 116 81 8
39986-167 114 7 197 138 11 226 170 11 239 182 13
39987-242 186 14 242 186 14 162 146 94 78 78 78
39988- 34 34 34 14 14 14 6 6 6 0 0 0
39989- 0 0 0 0 0 0 0 0 0 0 0 0
39990- 0 0 0 0 0 0 0 0 0 0 0 0
39991- 0 0 0 0 0 0 0 0 0 0 0 0
39992- 0 0 0 0 0 0 0 0 0 6 6 6
39993- 30 30 30 78 78 78 190 142 34 226 170 11
39994-239 182 13 246 190 14 246 190 14 246 190 14
39995-246 190 14 246 190 14 246 190 14 246 190 14
39996-246 190 14 246 190 14 246 190 14 246 190 14
39997-246 190 14 241 196 14 203 166 17 22 18 6
39998- 2 2 6 2 2 6 2 2 6 38 38 38
39999-218 218 218 253 253 253 253 253 253 253 253 253
40000-253 253 253 253 253 253 253 253 253 253 253 253
40001-253 253 253 253 253 253 253 253 253 253 253 253
40002-253 253 253 253 253 253 253 253 253 253 253 253
40003-250 250 250 206 206 206 198 198 198 202 162 69
40004-226 170 11 236 178 12 224 166 10 210 150 10
40005-200 144 11 197 138 11 192 133 9 197 138 11
40006-210 150 10 226 170 11 242 186 14 246 190 14
40007-246 190 14 246 186 14 225 175 15 124 112 88
40008- 62 62 62 30 30 30 14 14 14 6 6 6
40009- 0 0 0 0 0 0 0 0 0 0 0 0
40010- 0 0 0 0 0 0 0 0 0 0 0 0
40011- 0 0 0 0 0 0 0 0 0 0 0 0
40012- 0 0 0 0 0 0 0 0 0 10 10 10
40013- 30 30 30 78 78 78 174 135 50 224 166 10
40014-239 182 13 246 190 14 246 190 14 246 190 14
40015-246 190 14 246 190 14 246 190 14 246 190 14
40016-246 190 14 246 190 14 246 190 14 246 190 14
40017-246 190 14 246 190 14 241 196 14 139 102 15
40018- 2 2 6 2 2 6 2 2 6 2 2 6
40019- 78 78 78 250 250 250 253 253 253 253 253 253
40020-253 253 253 253 253 253 253 253 253 253 253 253
40021-253 253 253 253 253 253 253 253 253 253 253 253
40022-253 253 253 253 253 253 253 253 253 253 253 253
40023-250 250 250 214 214 214 198 198 198 190 150 46
40024-219 162 10 236 178 12 234 174 13 224 166 10
40025-216 158 10 213 154 11 213 154 11 216 158 10
40026-226 170 11 239 182 13 246 190 14 246 190 14
40027-246 190 14 246 190 14 242 186 14 206 162 42
40028-101 101 101 58 58 58 30 30 30 14 14 14
40029- 6 6 6 0 0 0 0 0 0 0 0 0
40030- 0 0 0 0 0 0 0 0 0 0 0 0
40031- 0 0 0 0 0 0 0 0 0 0 0 0
40032- 0 0 0 0 0 0 0 0 0 10 10 10
40033- 30 30 30 74 74 74 174 135 50 216 158 10
40034-236 178 12 246 190 14 246 190 14 246 190 14
40035-246 190 14 246 190 14 246 190 14 246 190 14
40036-246 190 14 246 190 14 246 190 14 246 190 14
40037-246 190 14 246 190 14 241 196 14 226 184 13
40038- 61 42 6 2 2 6 2 2 6 2 2 6
40039- 22 22 22 238 238 238 253 253 253 253 253 253
40040-253 253 253 253 253 253 253 253 253 253 253 253
40041-253 253 253 253 253 253 253 253 253 253 253 253
40042-253 253 253 253 253 253 253 253 253 253 253 253
40043-253 253 253 226 226 226 187 187 187 180 133 36
40044-216 158 10 236 178 12 239 182 13 236 178 12
40045-230 174 11 226 170 11 226 170 11 230 174 11
40046-236 178 12 242 186 14 246 190 14 246 190 14
40047-246 190 14 246 190 14 246 186 14 239 182 13
40048-206 162 42 106 106 106 66 66 66 34 34 34
40049- 14 14 14 6 6 6 0 0 0 0 0 0
40050- 0 0 0 0 0 0 0 0 0 0 0 0
40051- 0 0 0 0 0 0 0 0 0 0 0 0
40052- 0 0 0 0 0 0 0 0 0 6 6 6
40053- 26 26 26 70 70 70 163 133 67 213 154 11
40054-236 178 12 246 190 14 246 190 14 246 190 14
40055-246 190 14 246 190 14 246 190 14 246 190 14
40056-246 190 14 246 190 14 246 190 14 246 190 14
40057-246 190 14 246 190 14 246 190 14 241 196 14
40058-190 146 13 18 14 6 2 2 6 2 2 6
40059- 46 46 46 246 246 246 253 253 253 253 253 253
40060-253 253 253 253 253 253 253 253 253 253 253 253
40061-253 253 253 253 253 253 253 253 253 253 253 253
40062-253 253 253 253 253 253 253 253 253 253 253 253
40063-253 253 253 221 221 221 86 86 86 156 107 11
40064-216 158 10 236 178 12 242 186 14 246 186 14
40065-242 186 14 239 182 13 239 182 13 242 186 14
40066-242 186 14 246 186 14 246 190 14 246 190 14
40067-246 190 14 246 190 14 246 190 14 246 190 14
40068-242 186 14 225 175 15 142 122 72 66 66 66
40069- 30 30 30 10 10 10 0 0 0 0 0 0
40070- 0 0 0 0 0 0 0 0 0 0 0 0
40071- 0 0 0 0 0 0 0 0 0 0 0 0
40072- 0 0 0 0 0 0 0 0 0 6 6 6
40073- 26 26 26 70 70 70 163 133 67 210 150 10
40074-236 178 12 246 190 14 246 190 14 246 190 14
40075-246 190 14 246 190 14 246 190 14 246 190 14
40076-246 190 14 246 190 14 246 190 14 246 190 14
40077-246 190 14 246 190 14 246 190 14 246 190 14
40078-232 195 16 121 92 8 34 34 34 106 106 106
40079-221 221 221 253 253 253 253 253 253 253 253 253
40080-253 253 253 253 253 253 253 253 253 253 253 253
40081-253 253 253 253 253 253 253 253 253 253 253 253
40082-253 253 253 253 253 253 253 253 253 253 253 253
40083-242 242 242 82 82 82 18 14 6 163 110 8
40084-216 158 10 236 178 12 242 186 14 246 190 14
40085-246 190 14 246 190 14 246 190 14 246 190 14
40086-246 190 14 246 190 14 246 190 14 246 190 14
40087-246 190 14 246 190 14 246 190 14 246 190 14
40088-246 190 14 246 190 14 242 186 14 163 133 67
40089- 46 46 46 18 18 18 6 6 6 0 0 0
40090- 0 0 0 0 0 0 0 0 0 0 0 0
40091- 0 0 0 0 0 0 0 0 0 0 0 0
40092- 0 0 0 0 0 0 0 0 0 10 10 10
40093- 30 30 30 78 78 78 163 133 67 210 150 10
40094-236 178 12 246 186 14 246 190 14 246 190 14
40095-246 190 14 246 190 14 246 190 14 246 190 14
40096-246 190 14 246 190 14 246 190 14 246 190 14
40097-246 190 14 246 190 14 246 190 14 246 190 14
40098-241 196 14 215 174 15 190 178 144 253 253 253
40099-253 253 253 253 253 253 253 253 253 253 253 253
40100-253 253 253 253 253 253 253 253 253 253 253 253
40101-253 253 253 253 253 253 253 253 253 253 253 253
40102-253 253 253 253 253 253 253 253 253 218 218 218
40103- 58 58 58 2 2 6 22 18 6 167 114 7
40104-216 158 10 236 178 12 246 186 14 246 190 14
40105-246 190 14 246 190 14 246 190 14 246 190 14
40106-246 190 14 246 190 14 246 190 14 246 190 14
40107-246 190 14 246 190 14 246 190 14 246 190 14
40108-246 190 14 246 186 14 242 186 14 190 150 46
40109- 54 54 54 22 22 22 6 6 6 0 0 0
40110- 0 0 0 0 0 0 0 0 0 0 0 0
40111- 0 0 0 0 0 0 0 0 0 0 0 0
40112- 0 0 0 0 0 0 0 0 0 14 14 14
40113- 38 38 38 86 86 86 180 133 36 213 154 11
40114-236 178 12 246 186 14 246 190 14 246 190 14
40115-246 190 14 246 190 14 246 190 14 246 190 14
40116-246 190 14 246 190 14 246 190 14 246 190 14
40117-246 190 14 246 190 14 246 190 14 246 190 14
40118-246 190 14 232 195 16 190 146 13 214 214 214
40119-253 253 253 253 253 253 253 253 253 253 253 253
40120-253 253 253 253 253 253 253 253 253 253 253 253
40121-253 253 253 253 253 253 253 253 253 253 253 253
40122-253 253 253 250 250 250 170 170 170 26 26 26
40123- 2 2 6 2 2 6 37 26 9 163 110 8
40124-219 162 10 239 182 13 246 186 14 246 190 14
40125-246 190 14 246 190 14 246 190 14 246 190 14
40126-246 190 14 246 190 14 246 190 14 246 190 14
40127-246 190 14 246 190 14 246 190 14 246 190 14
40128-246 186 14 236 178 12 224 166 10 142 122 72
40129- 46 46 46 18 18 18 6 6 6 0 0 0
40130- 0 0 0 0 0 0 0 0 0 0 0 0
40131- 0 0 0 0 0 0 0 0 0 0 0 0
40132- 0 0 0 0 0 0 6 6 6 18 18 18
40133- 50 50 50 109 106 95 192 133 9 224 166 10
40134-242 186 14 246 190 14 246 190 14 246 190 14
40135-246 190 14 246 190 14 246 190 14 246 190 14
40136-246 190 14 246 190 14 246 190 14 246 190 14
40137-246 190 14 246 190 14 246 190 14 246 190 14
40138-242 186 14 226 184 13 210 162 10 142 110 46
40139-226 226 226 253 253 253 253 253 253 253 253 253
40140-253 253 253 253 253 253 253 253 253 253 253 253
40141-253 253 253 253 253 253 253 253 253 253 253 253
40142-198 198 198 66 66 66 2 2 6 2 2 6
40143- 2 2 6 2 2 6 50 34 6 156 107 11
40144-219 162 10 239 182 13 246 186 14 246 190 14
40145-246 190 14 246 190 14 246 190 14 246 190 14
40146-246 190 14 246 190 14 246 190 14 246 190 14
40147-246 190 14 246 190 14 246 190 14 242 186 14
40148-234 174 13 213 154 11 154 122 46 66 66 66
40149- 30 30 30 10 10 10 0 0 0 0 0 0
40150- 0 0 0 0 0 0 0 0 0 0 0 0
40151- 0 0 0 0 0 0 0 0 0 0 0 0
40152- 0 0 0 0 0 0 6 6 6 22 22 22
40153- 58 58 58 154 121 60 206 145 10 234 174 13
40154-242 186 14 246 186 14 246 190 14 246 190 14
40155-246 190 14 246 190 14 246 190 14 246 190 14
40156-246 190 14 246 190 14 246 190 14 246 190 14
40157-246 190 14 246 190 14 246 190 14 246 190 14
40158-246 186 14 236 178 12 210 162 10 163 110 8
40159- 61 42 6 138 138 138 218 218 218 250 250 250
40160-253 253 253 253 253 253 253 253 253 250 250 250
40161-242 242 242 210 210 210 144 144 144 66 66 66
40162- 6 6 6 2 2 6 2 2 6 2 2 6
40163- 2 2 6 2 2 6 61 42 6 163 110 8
40164-216 158 10 236 178 12 246 190 14 246 190 14
40165-246 190 14 246 190 14 246 190 14 246 190 14
40166-246 190 14 246 190 14 246 190 14 246 190 14
40167-246 190 14 239 182 13 230 174 11 216 158 10
40168-190 142 34 124 112 88 70 70 70 38 38 38
40169- 18 18 18 6 6 6 0 0 0 0 0 0
40170- 0 0 0 0 0 0 0 0 0 0 0 0
40171- 0 0 0 0 0 0 0 0 0 0 0 0
40172- 0 0 0 0 0 0 6 6 6 22 22 22
40173- 62 62 62 168 124 44 206 145 10 224 166 10
40174-236 178 12 239 182 13 242 186 14 242 186 14
40175-246 186 14 246 190 14 246 190 14 246 190 14
40176-246 190 14 246 190 14 246 190 14 246 190 14
40177-246 190 14 246 190 14 246 190 14 246 190 14
40178-246 190 14 236 178 12 216 158 10 175 118 6
40179- 80 54 7 2 2 6 6 6 6 30 30 30
40180- 54 54 54 62 62 62 50 50 50 38 38 38
40181- 14 14 14 2 2 6 2 2 6 2 2 6
40182- 2 2 6 2 2 6 2 2 6 2 2 6
40183- 2 2 6 6 6 6 80 54 7 167 114 7
40184-213 154 11 236 178 12 246 190 14 246 190 14
40185-246 190 14 246 190 14 246 190 14 246 190 14
40186-246 190 14 242 186 14 239 182 13 239 182 13
40187-230 174 11 210 150 10 174 135 50 124 112 88
40188- 82 82 82 54 54 54 34 34 34 18 18 18
40189- 6 6 6 0 0 0 0 0 0 0 0 0
40190- 0 0 0 0 0 0 0 0 0 0 0 0
40191- 0 0 0 0 0 0 0 0 0 0 0 0
40192- 0 0 0 0 0 0 6 6 6 18 18 18
40193- 50 50 50 158 118 36 192 133 9 200 144 11
40194-216 158 10 219 162 10 224 166 10 226 170 11
40195-230 174 11 236 178 12 239 182 13 239 182 13
40196-242 186 14 246 186 14 246 190 14 246 190 14
40197-246 190 14 246 190 14 246 190 14 246 190 14
40198-246 186 14 230 174 11 210 150 10 163 110 8
40199-104 69 6 10 10 10 2 2 6 2 2 6
40200- 2 2 6 2 2 6 2 2 6 2 2 6
40201- 2 2 6 2 2 6 2 2 6 2 2 6
40202- 2 2 6 2 2 6 2 2 6 2 2 6
40203- 2 2 6 6 6 6 91 60 6 167 114 7
40204-206 145 10 230 174 11 242 186 14 246 190 14
40205-246 190 14 246 190 14 246 186 14 242 186 14
40206-239 182 13 230 174 11 224 166 10 213 154 11
40207-180 133 36 124 112 88 86 86 86 58 58 58
40208- 38 38 38 22 22 22 10 10 10 6 6 6
40209- 0 0 0 0 0 0 0 0 0 0 0 0
40210- 0 0 0 0 0 0 0 0 0 0 0 0
40211- 0 0 0 0 0 0 0 0 0 0 0 0
40212- 0 0 0 0 0 0 0 0 0 14 14 14
40213- 34 34 34 70 70 70 138 110 50 158 118 36
40214-167 114 7 180 123 7 192 133 9 197 138 11
40215-200 144 11 206 145 10 213 154 11 219 162 10
40216-224 166 10 230 174 11 239 182 13 242 186 14
40217-246 186 14 246 186 14 246 186 14 246 186 14
40218-239 182 13 216 158 10 185 133 11 152 99 6
40219-104 69 6 18 14 6 2 2 6 2 2 6
40220- 2 2 6 2 2 6 2 2 6 2 2 6
40221- 2 2 6 2 2 6 2 2 6 2 2 6
40222- 2 2 6 2 2 6 2 2 6 2 2 6
40223- 2 2 6 6 6 6 80 54 7 152 99 6
40224-192 133 9 219 162 10 236 178 12 239 182 13
40225-246 186 14 242 186 14 239 182 13 236 178 12
40226-224 166 10 206 145 10 192 133 9 154 121 60
40227- 94 94 94 62 62 62 42 42 42 22 22 22
40228- 14 14 14 6 6 6 0 0 0 0 0 0
40229- 0 0 0 0 0 0 0 0 0 0 0 0
40230- 0 0 0 0 0 0 0 0 0 0 0 0
40231- 0 0 0 0 0 0 0 0 0 0 0 0
40232- 0 0 0 0 0 0 0 0 0 6 6 6
40233- 18 18 18 34 34 34 58 58 58 78 78 78
40234-101 98 89 124 112 88 142 110 46 156 107 11
40235-163 110 8 167 114 7 175 118 6 180 123 7
40236-185 133 11 197 138 11 210 150 10 219 162 10
40237-226 170 11 236 178 12 236 178 12 234 174 13
40238-219 162 10 197 138 11 163 110 8 130 83 6
40239- 91 60 6 10 10 10 2 2 6 2 2 6
40240- 18 18 18 38 38 38 38 38 38 38 38 38
40241- 38 38 38 38 38 38 38 38 38 38 38 38
40242- 38 38 38 38 38 38 26 26 26 2 2 6
40243- 2 2 6 6 6 6 70 47 6 137 92 6
40244-175 118 6 200 144 11 219 162 10 230 174 11
40245-234 174 13 230 174 11 219 162 10 210 150 10
40246-192 133 9 163 110 8 124 112 88 82 82 82
40247- 50 50 50 30 30 30 14 14 14 6 6 6
40248- 0 0 0 0 0 0 0 0 0 0 0 0
40249- 0 0 0 0 0 0 0 0 0 0 0 0
40250- 0 0 0 0 0 0 0 0 0 0 0 0
40251- 0 0 0 0 0 0 0 0 0 0 0 0
40252- 0 0 0 0 0 0 0 0 0 0 0 0
40253- 6 6 6 14 14 14 22 22 22 34 34 34
40254- 42 42 42 58 58 58 74 74 74 86 86 86
40255-101 98 89 122 102 70 130 98 46 121 87 25
40256-137 92 6 152 99 6 163 110 8 180 123 7
40257-185 133 11 197 138 11 206 145 10 200 144 11
40258-180 123 7 156 107 11 130 83 6 104 69 6
40259- 50 34 6 54 54 54 110 110 110 101 98 89
40260- 86 86 86 82 82 82 78 78 78 78 78 78
40261- 78 78 78 78 78 78 78 78 78 78 78 78
40262- 78 78 78 82 82 82 86 86 86 94 94 94
40263-106 106 106 101 101 101 86 66 34 124 80 6
40264-156 107 11 180 123 7 192 133 9 200 144 11
40265-206 145 10 200 144 11 192 133 9 175 118 6
40266-139 102 15 109 106 95 70 70 70 42 42 42
40267- 22 22 22 10 10 10 0 0 0 0 0 0
40268- 0 0 0 0 0 0 0 0 0 0 0 0
40269- 0 0 0 0 0 0 0 0 0 0 0 0
40270- 0 0 0 0 0 0 0 0 0 0 0 0
40271- 0 0 0 0 0 0 0 0 0 0 0 0
40272- 0 0 0 0 0 0 0 0 0 0 0 0
40273- 0 0 0 0 0 0 6 6 6 10 10 10
40274- 14 14 14 22 22 22 30 30 30 38 38 38
40275- 50 50 50 62 62 62 74 74 74 90 90 90
40276-101 98 89 112 100 78 121 87 25 124 80 6
40277-137 92 6 152 99 6 152 99 6 152 99 6
40278-138 86 6 124 80 6 98 70 6 86 66 30
40279-101 98 89 82 82 82 58 58 58 46 46 46
40280- 38 38 38 34 34 34 34 34 34 34 34 34
40281- 34 34 34 34 34 34 34 34 34 34 34 34
40282- 34 34 34 34 34 34 38 38 38 42 42 42
40283- 54 54 54 82 82 82 94 86 76 91 60 6
40284-134 86 6 156 107 11 167 114 7 175 118 6
40285-175 118 6 167 114 7 152 99 6 121 87 25
40286-101 98 89 62 62 62 34 34 34 18 18 18
40287- 6 6 6 0 0 0 0 0 0 0 0 0
40288- 0 0 0 0 0 0 0 0 0 0 0 0
40289- 0 0 0 0 0 0 0 0 0 0 0 0
40290- 0 0 0 0 0 0 0 0 0 0 0 0
40291- 0 0 0 0 0 0 0 0 0 0 0 0
40292- 0 0 0 0 0 0 0 0 0 0 0 0
40293- 0 0 0 0 0 0 0 0 0 0 0 0
40294- 0 0 0 6 6 6 6 6 6 10 10 10
40295- 18 18 18 22 22 22 30 30 30 42 42 42
40296- 50 50 50 66 66 66 86 86 86 101 98 89
40297-106 86 58 98 70 6 104 69 6 104 69 6
40298-104 69 6 91 60 6 82 62 34 90 90 90
40299- 62 62 62 38 38 38 22 22 22 14 14 14
40300- 10 10 10 10 10 10 10 10 10 10 10 10
40301- 10 10 10 10 10 10 6 6 6 10 10 10
40302- 10 10 10 10 10 10 10 10 10 14 14 14
40303- 22 22 22 42 42 42 70 70 70 89 81 66
40304- 80 54 7 104 69 6 124 80 6 137 92 6
40305-134 86 6 116 81 8 100 82 52 86 86 86
40306- 58 58 58 30 30 30 14 14 14 6 6 6
40307- 0 0 0 0 0 0 0 0 0 0 0 0
40308- 0 0 0 0 0 0 0 0 0 0 0 0
40309- 0 0 0 0 0 0 0 0 0 0 0 0
40310- 0 0 0 0 0 0 0 0 0 0 0 0
40311- 0 0 0 0 0 0 0 0 0 0 0 0
40312- 0 0 0 0 0 0 0 0 0 0 0 0
40313- 0 0 0 0 0 0 0 0 0 0 0 0
40314- 0 0 0 0 0 0 0 0 0 0 0 0
40315- 0 0 0 6 6 6 10 10 10 14 14 14
40316- 18 18 18 26 26 26 38 38 38 54 54 54
40317- 70 70 70 86 86 86 94 86 76 89 81 66
40318- 89 81 66 86 86 86 74 74 74 50 50 50
40319- 30 30 30 14 14 14 6 6 6 0 0 0
40320- 0 0 0 0 0 0 0 0 0 0 0 0
40321- 0 0 0 0 0 0 0 0 0 0 0 0
40322- 0 0 0 0 0 0 0 0 0 0 0 0
40323- 6 6 6 18 18 18 34 34 34 58 58 58
40324- 82 82 82 89 81 66 89 81 66 89 81 66
40325- 94 86 66 94 86 76 74 74 74 50 50 50
40326- 26 26 26 14 14 14 6 6 6 0 0 0
40327- 0 0 0 0 0 0 0 0 0 0 0 0
40328- 0 0 0 0 0 0 0 0 0 0 0 0
40329- 0 0 0 0 0 0 0 0 0 0 0 0
40330- 0 0 0 0 0 0 0 0 0 0 0 0
40331- 0 0 0 0 0 0 0 0 0 0 0 0
40332- 0 0 0 0 0 0 0 0 0 0 0 0
40333- 0 0 0 0 0 0 0 0 0 0 0 0
40334- 0 0 0 0 0 0 0 0 0 0 0 0
40335- 0 0 0 0 0 0 0 0 0 0 0 0
40336- 6 6 6 6 6 6 14 14 14 18 18 18
40337- 30 30 30 38 38 38 46 46 46 54 54 54
40338- 50 50 50 42 42 42 30 30 30 18 18 18
40339- 10 10 10 0 0 0 0 0 0 0 0 0
40340- 0 0 0 0 0 0 0 0 0 0 0 0
40341- 0 0 0 0 0 0 0 0 0 0 0 0
40342- 0 0 0 0 0 0 0 0 0 0 0 0
40343- 0 0 0 6 6 6 14 14 14 26 26 26
40344- 38 38 38 50 50 50 58 58 58 58 58 58
40345- 54 54 54 42 42 42 30 30 30 18 18 18
40346- 10 10 10 0 0 0 0 0 0 0 0 0
40347- 0 0 0 0 0 0 0 0 0 0 0 0
40348- 0 0 0 0 0 0 0 0 0 0 0 0
40349- 0 0 0 0 0 0 0 0 0 0 0 0
40350- 0 0 0 0 0 0 0 0 0 0 0 0
40351- 0 0 0 0 0 0 0 0 0 0 0 0
40352- 0 0 0 0 0 0 0 0 0 0 0 0
40353- 0 0 0 0 0 0 0 0 0 0 0 0
40354- 0 0 0 0 0 0 0 0 0 0 0 0
40355- 0 0 0 0 0 0 0 0 0 0 0 0
40356- 0 0 0 0 0 0 0 0 0 6 6 6
40357- 6 6 6 10 10 10 14 14 14 18 18 18
40358- 18 18 18 14 14 14 10 10 10 6 6 6
40359- 0 0 0 0 0 0 0 0 0 0 0 0
40360- 0 0 0 0 0 0 0 0 0 0 0 0
40361- 0 0 0 0 0 0 0 0 0 0 0 0
40362- 0 0 0 0 0 0 0 0 0 0 0 0
40363- 0 0 0 0 0 0 0 0 0 6 6 6
40364- 14 14 14 18 18 18 22 22 22 22 22 22
40365- 18 18 18 14 14 14 10 10 10 6 6 6
40366- 0 0 0 0 0 0 0 0 0 0 0 0
40367- 0 0 0 0 0 0 0 0 0 0 0 0
40368- 0 0 0 0 0 0 0 0 0 0 0 0
40369- 0 0 0 0 0 0 0 0 0 0 0 0
40370- 0 0 0 0 0 0 0 0 0 0 0 0
40371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40375+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40384+4 4 4 4 4 4
40385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40396+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40398+4 4 4 4 4 4
40399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40410+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40412+4 4 4 4 4 4
40413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40426+4 4 4 4 4 4
40427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40440+4 4 4 4 4 4
40441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40454+4 4 4 4 4 4
40455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40459+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
40460+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
40461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40464+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
40465+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40466+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
40467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40468+4 4 4 4 4 4
40469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40473+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
40474+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
40475+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40478+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
40479+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
40480+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
40481+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40482+4 4 4 4 4 4
40483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40487+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
40488+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
40489+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40492+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
40493+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
40494+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
40495+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
40496+4 4 4 4 4 4
40497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40500+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
40501+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
40502+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
40503+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
40504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40505+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40506+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
40507+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
40508+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
40509+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
40510+4 4 4 4 4 4
40511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40514+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
40515+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
40516+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
40517+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
40518+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40519+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
40520+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
40521+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
40522+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
40523+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
40524+4 4 4 4 4 4
40525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40528+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
40529+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
40530+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
40531+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
40532+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40533+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40534+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40535+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40536+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40537+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40538+4 4 4 4 4 4
40539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40541+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40542+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40543+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40544+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40545+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40546+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40547+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40548+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40549+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40550+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40551+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40552+4 4 4 4 4 4
40553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40555+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40556+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40557+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40558+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40559+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40560+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40561+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40562+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40563+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40564+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40565+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40566+4 4 4 4 4 4
40567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40569+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40570+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40571+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40572+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40573+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40574+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40575+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40576+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40577+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40578+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40579+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40580+4 4 4 4 4 4
40581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40583+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40584+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40585+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40586+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40587+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40588+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40589+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40590+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40591+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40592+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40593+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40594+4 4 4 4 4 4
40595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40596+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40597+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40598+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40599+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40600+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40601+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40602+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40603+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40604+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40605+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40606+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40607+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40608+4 4 4 4 4 4
40609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40610+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40611+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40612+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40613+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40614+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40615+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40616+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40617+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40618+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40619+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40620+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40621+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40622+0 0 0 4 4 4
40623+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40624+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40625+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40626+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40627+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40628+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40629+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40630+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40631+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40632+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40633+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40634+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40635+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40636+2 0 0 0 0 0
40637+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40638+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40639+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40640+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40641+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40642+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40643+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40644+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40645+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40646+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40647+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40648+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40649+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40650+37 38 37 0 0 0
40651+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40652+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40653+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40654+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40655+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40656+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40657+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40658+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40659+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40660+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40661+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40662+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40663+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40664+85 115 134 4 0 0
40665+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40666+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40667+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40668+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40669+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40670+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40671+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40672+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40673+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40674+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40675+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40676+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40677+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40678+60 73 81 4 0 0
40679+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40680+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40681+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40682+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40683+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40684+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40685+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40686+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40687+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40688+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40689+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40690+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40691+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40692+16 19 21 4 0 0
40693+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40694+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40695+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40696+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40697+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40698+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40699+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40700+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40701+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40702+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40703+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40704+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40705+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40706+4 0 0 4 3 3
40707+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40708+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40709+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40711+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40712+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40713+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40714+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40715+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40716+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40717+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40718+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40719+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40720+3 2 2 4 4 4
40721+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40722+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40723+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40724+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40725+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40726+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40727+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40728+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40729+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40730+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40731+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40732+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40733+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40734+4 4 4 4 4 4
40735+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40736+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40737+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40738+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40739+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40740+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40741+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40742+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40743+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40744+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40745+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40746+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40747+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40748+4 4 4 4 4 4
40749+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40750+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40751+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40752+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40753+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40754+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40755+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40756+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40757+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40758+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40759+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40760+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40761+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40762+5 5 5 5 5 5
40763+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40764+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40765+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40766+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40767+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40768+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40769+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40770+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40771+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40772+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40773+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40774+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40775+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40776+5 5 5 4 4 4
40777+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40778+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40779+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40780+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40781+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40782+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40783+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40784+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40785+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40786+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40787+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40788+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40790+4 4 4 4 4 4
40791+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40792+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40793+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40794+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40795+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40796+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40797+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40798+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40799+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40800+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40801+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40802+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40804+4 4 4 4 4 4
40805+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40806+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40807+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40808+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40809+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40810+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40811+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40812+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40813+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40814+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40815+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40818+4 4 4 4 4 4
40819+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40820+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40821+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40822+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40823+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40824+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40825+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40826+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40827+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40828+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40829+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40832+4 4 4 4 4 4
40833+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40834+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40835+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40836+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40837+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40838+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40839+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40840+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40841+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40842+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40843+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40846+4 4 4 4 4 4
40847+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40848+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40849+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40850+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40851+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40852+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40853+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40854+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40855+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40856+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40857+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40860+4 4 4 4 4 4
40861+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40862+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40863+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40864+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40865+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40866+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40867+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40868+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40869+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40870+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40871+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40874+4 4 4 4 4 4
40875+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40876+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40877+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40878+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40879+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40880+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40881+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40882+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40883+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40884+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40885+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40888+4 4 4 4 4 4
40889+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40890+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40891+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40892+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40893+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40894+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40895+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40896+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40897+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40898+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40899+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40902+4 4 4 4 4 4
40903+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40904+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40905+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40906+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40907+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40908+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40909+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40910+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40911+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40912+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40913+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40916+4 4 4 4 4 4
40917+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40918+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40919+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40920+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40921+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40922+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40923+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40924+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40925+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40926+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40927+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40930+4 4 4 4 4 4
40931+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40932+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40933+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40934+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40935+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40936+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40937+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40938+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40939+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40940+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40941+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40944+4 4 4 4 4 4
40945+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40946+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40947+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40948+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40949+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40950+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40951+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40952+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40953+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40954+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40955+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40958+4 4 4 4 4 4
40959+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40960+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40961+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40962+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40963+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40964+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40965+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40966+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40967+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40968+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40969+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40972+4 4 4 4 4 4
40973+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40974+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40975+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40976+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40977+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40978+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40979+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40980+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40981+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40982+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40983+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40986+4 4 4 4 4 4
40987+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40988+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40989+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40990+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40991+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40992+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40993+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40994+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40995+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40996+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40997+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41000+4 4 4 4 4 4
41001+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
41002+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
41003+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41004+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
41005+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
41006+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
41007+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
41008+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
41009+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
41010+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41011+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41014+4 4 4 4 4 4
41015+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41016+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
41017+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
41018+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
41019+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
41020+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
41021+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
41022+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
41023+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41024+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41025+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41028+4 4 4 4 4 4
41029+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41030+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
41031+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41032+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
41033+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
41034+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
41035+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
41036+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
41037+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41038+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41039+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41042+4 4 4 4 4 4
41043+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41044+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
41045+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
41046+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
41047+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
41048+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
41049+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41050+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
41051+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41052+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41053+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41056+4 4 4 4 4 4
41057+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41058+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
41059+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
41060+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41061+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
41062+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
41063+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41064+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
41065+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41066+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41067+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41070+4 4 4 4 4 4
41071+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41072+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
41073+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
41074+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
41075+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
41076+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
41077+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
41078+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
41079+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
41080+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41081+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41084+4 4 4 4 4 4
41085+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41086+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
41087+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
41088+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
41089+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
41090+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
41091+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
41092+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
41093+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
41094+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41095+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41098+4 4 4 4 4 4
41099+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
41100+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
41101+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
41102+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
41103+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41104+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
41105+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
41106+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
41107+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
41108+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41109+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41112+4 4 4 4 4 4
41113+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41114+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
41115+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
41116+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
41117+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
41118+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
41119+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
41120+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
41121+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
41122+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41123+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41126+4 4 4 4 4 4
41127+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
41128+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
41129+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
41130+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
41131+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
41132+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
41133+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
41134+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
41135+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
41136+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
41137+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41140+4 4 4 4 4 4
41141+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
41142+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41143+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
41144+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
41145+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
41146+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
41147+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
41148+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
41149+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
41150+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
41151+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41154+4 4 4 4 4 4
41155+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
41156+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41157+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
41158+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
41159+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41160+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41161+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41162+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
41163+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
41164+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
41165+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41168+4 4 4 4 4 4
41169+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
41170+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
41171+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
41172+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
41173+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
41174+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
41175+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
41176+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
41177+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
41178+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
41179+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41182+4 4 4 4 4 4
41183+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
41184+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
41185+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41186+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
41187+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
41188+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
41189+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
41190+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
41191+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
41192+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
41193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41196+4 4 4 4 4 4
41197+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41198+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
41199+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
41200+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
41201+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
41202+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
41203+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
41204+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
41205+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
41206+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41210+4 4 4 4 4 4
41211+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
41212+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
41213+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
41214+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
41215+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
41216+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
41217+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
41218+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
41219+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
41220+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41224+4 4 4 4 4 4
41225+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
41226+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
41227+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
41228+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
41229+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
41230+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
41231+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
41232+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
41233+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41234+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41238+4 4 4 4 4 4
41239+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
41240+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41241+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
41242+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41243+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
41244+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
41245+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
41246+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
41247+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
41248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41252+4 4 4 4 4 4
41253+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
41254+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
41255+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
41256+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
41257+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
41258+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
41259+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
41260+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
41261+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
41262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41266+4 4 4 4 4 4
41267+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41268+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
41269+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
41270+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
41271+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
41272+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
41273+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
41274+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
41275+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41280+4 4 4 4 4 4
41281+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
41282+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
41283+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41284+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
41285+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
41286+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
41287+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
41288+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
41289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41294+4 4 4 4 4 4
41295+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41296+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
41297+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
41298+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
41299+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
41300+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
41301+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
41302+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41308+4 4 4 4 4 4
41309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41310+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
41311+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41312+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
41313+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
41314+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
41315+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
41316+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
41317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41322+4 4 4 4 4 4
41323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41324+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
41325+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
41326+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
41327+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
41328+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
41329+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
41330+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
41331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41336+4 4 4 4 4 4
41337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41338+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41339+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
41340+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41341+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
41342+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
41343+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
41344+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41350+4 4 4 4 4 4
41351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41353+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41354+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
41355+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
41356+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
41357+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
41358+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41364+4 4 4 4 4 4
41365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41368+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41369+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
41370+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
41371+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
41372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41375+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41378+4 4 4 4 4 4
41379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41382+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41383+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41384+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
41385+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
41386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41392+4 4 4 4 4 4
41393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41396+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41397+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41398+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41399+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
41400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41406+4 4 4 4 4 4
41407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41410+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
41411+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
41412+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
41413+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
41414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41420+4 4 4 4 4 4
41421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41425+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
41426+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41427+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41434+4 4 4 4 4 4
41435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41439+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
41440+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
41441+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41448+4 4 4 4 4 4
41449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41453+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
41454+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
41455+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41462+4 4 4 4 4 4
41463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41467+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
41468+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
41469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41476+4 4 4 4 4 4
41477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41481+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41482+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
41483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41490+4 4 4 4 4 4
41491diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
41492index a40c05e..785c583 100644
41493--- a/drivers/video/udlfb.c
41494+++ b/drivers/video/udlfb.c
41495@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
41496 dlfb_urb_completion(urb);
41497
41498 error:
41499- atomic_add(bytes_sent, &dev->bytes_sent);
41500- atomic_add(bytes_identical, &dev->bytes_identical);
41501- atomic_add(width*height*2, &dev->bytes_rendered);
41502+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41503+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41504+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
41505 end_cycles = get_cycles();
41506- atomic_add(((unsigned int) ((end_cycles - start_cycles)
41507+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41508 >> 10)), /* Kcycles */
41509 &dev->cpu_kcycles_used);
41510
41511@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
41512 dlfb_urb_completion(urb);
41513
41514 error:
41515- atomic_add(bytes_sent, &dev->bytes_sent);
41516- atomic_add(bytes_identical, &dev->bytes_identical);
41517- atomic_add(bytes_rendered, &dev->bytes_rendered);
41518+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41519+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41520+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
41521 end_cycles = get_cycles();
41522- atomic_add(((unsigned int) ((end_cycles - start_cycles)
41523+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41524 >> 10)), /* Kcycles */
41525 &dev->cpu_kcycles_used);
41526 }
41527@@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
41528 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41529 struct dlfb_data *dev = fb_info->par;
41530 return snprintf(buf, PAGE_SIZE, "%u\n",
41531- atomic_read(&dev->bytes_rendered));
41532+ atomic_read_unchecked(&dev->bytes_rendered));
41533 }
41534
41535 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41536@@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41537 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41538 struct dlfb_data *dev = fb_info->par;
41539 return snprintf(buf, PAGE_SIZE, "%u\n",
41540- atomic_read(&dev->bytes_identical));
41541+ atomic_read_unchecked(&dev->bytes_identical));
41542 }
41543
41544 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41545@@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41546 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41547 struct dlfb_data *dev = fb_info->par;
41548 return snprintf(buf, PAGE_SIZE, "%u\n",
41549- atomic_read(&dev->bytes_sent));
41550+ atomic_read_unchecked(&dev->bytes_sent));
41551 }
41552
41553 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41554@@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41555 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41556 struct dlfb_data *dev = fb_info->par;
41557 return snprintf(buf, PAGE_SIZE, "%u\n",
41558- atomic_read(&dev->cpu_kcycles_used));
41559+ atomic_read_unchecked(&dev->cpu_kcycles_used));
41560 }
41561
41562 static ssize_t edid_show(
41563@@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
41564 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41565 struct dlfb_data *dev = fb_info->par;
41566
41567- atomic_set(&dev->bytes_rendered, 0);
41568- atomic_set(&dev->bytes_identical, 0);
41569- atomic_set(&dev->bytes_sent, 0);
41570- atomic_set(&dev->cpu_kcycles_used, 0);
41571+ atomic_set_unchecked(&dev->bytes_rendered, 0);
41572+ atomic_set_unchecked(&dev->bytes_identical, 0);
41573+ atomic_set_unchecked(&dev->bytes_sent, 0);
41574+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41575
41576 return count;
41577 }
41578diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
41579index e7f69ef..83af4fd 100644
41580--- a/drivers/video/uvesafb.c
41581+++ b/drivers/video/uvesafb.c
41582@@ -19,6 +19,7 @@
41583 #include <linux/io.h>
41584 #include <linux/mutex.h>
41585 #include <linux/slab.h>
41586+#include <linux/moduleloader.h>
41587 #include <video/edid.h>
41588 #include <video/uvesafb.h>
41589 #ifdef CONFIG_X86
41590@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
41591 NULL,
41592 };
41593
41594- return call_usermodehelper(v86d_path, argv, envp, 1);
41595+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
41596 }
41597
41598 /*
41599@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
41600 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41601 par->pmi_setpal = par->ypan = 0;
41602 } else {
41603+
41604+#ifdef CONFIG_PAX_KERNEXEC
41605+#ifdef CONFIG_MODULES
41606+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41607+#endif
41608+ if (!par->pmi_code) {
41609+ par->pmi_setpal = par->ypan = 0;
41610+ return 0;
41611+ }
41612+#endif
41613+
41614 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41615 + task->t.regs.edi);
41616+
41617+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41618+ pax_open_kernel();
41619+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
41620+ pax_close_kernel();
41621+
41622+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41623+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41624+#else
41625 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41626 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41627+#endif
41628+
41629 printk(KERN_INFO "uvesafb: protected mode interface info at "
41630 "%04x:%04x\n",
41631 (u16)task->t.regs.es, (u16)task->t.regs.edi);
41632@@ -1821,6 +1844,11 @@ out:
41633 if (par->vbe_modes)
41634 kfree(par->vbe_modes);
41635
41636+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41637+ if (par->pmi_code)
41638+ module_free_exec(NULL, par->pmi_code);
41639+#endif
41640+
41641 framebuffer_release(info);
41642 return err;
41643 }
41644@@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platform_device *dev)
41645 kfree(par->vbe_state_orig);
41646 if (par->vbe_state_saved)
41647 kfree(par->vbe_state_saved);
41648+
41649+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41650+ if (par->pmi_code)
41651+ module_free_exec(NULL, par->pmi_code);
41652+#endif
41653+
41654 }
41655
41656 framebuffer_release(info);
41657diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41658index 501b340..86bd4cf 100644
41659--- a/drivers/video/vesafb.c
41660+++ b/drivers/video/vesafb.c
41661@@ -9,6 +9,7 @@
41662 */
41663
41664 #include <linux/module.h>
41665+#include <linux/moduleloader.h>
41666 #include <linux/kernel.h>
41667 #include <linux/errno.h>
41668 #include <linux/string.h>
41669@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
41670 static int vram_total __initdata; /* Set total amount of memory */
41671 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41672 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41673-static void (*pmi_start)(void) __read_mostly;
41674-static void (*pmi_pal) (void) __read_mostly;
41675+static void (*pmi_start)(void) __read_only;
41676+static void (*pmi_pal) (void) __read_only;
41677 static int depth __read_mostly;
41678 static int vga_compat __read_mostly;
41679 /* --------------------------------------------------------------------- */
41680@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
41681 unsigned int size_vmode;
41682 unsigned int size_remap;
41683 unsigned int size_total;
41684+ void *pmi_code = NULL;
41685
41686 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41687 return -ENODEV;
41688@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
41689 size_remap = size_total;
41690 vesafb_fix.smem_len = size_remap;
41691
41692-#ifndef __i386__
41693- screen_info.vesapm_seg = 0;
41694-#endif
41695-
41696 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41697 printk(KERN_WARNING
41698 "vesafb: cannot reserve video memory at 0x%lx\n",
41699@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
41700 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41701 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41702
41703+#ifdef __i386__
41704+
41705+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41706+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
41707+ if (!pmi_code)
41708+#elif !defined(CONFIG_PAX_KERNEXEC)
41709+ if (0)
41710+#endif
41711+
41712+#endif
41713+ screen_info.vesapm_seg = 0;
41714+
41715 if (screen_info.vesapm_seg) {
41716- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41717- screen_info.vesapm_seg,screen_info.vesapm_off);
41718+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41719+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41720 }
41721
41722 if (screen_info.vesapm_seg < 0xc000)
41723@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
41724
41725 if (ypan || pmi_setpal) {
41726 unsigned short *pmi_base;
41727+
41728 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
41729- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41730- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41731+
41732+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41733+ pax_open_kernel();
41734+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41735+#else
41736+ pmi_code = pmi_base;
41737+#endif
41738+
41739+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41740+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41741+
41742+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41743+ pmi_start = ktva_ktla(pmi_start);
41744+ pmi_pal = ktva_ktla(pmi_pal);
41745+ pax_close_kernel();
41746+#endif
41747+
41748 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41749 if (pmi_base[3]) {
41750 printk(KERN_INFO "vesafb: pmi: ports = ");
41751@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
41752 info->node, info->fix.id);
41753 return 0;
41754 err:
41755+
41756+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41757+ module_free_exec(NULL, pmi_code);
41758+#endif
41759+
41760 if (info->screen_base)
41761 iounmap(info->screen_base);
41762 framebuffer_release(info);
41763diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41764index 88714ae..16c2e11 100644
41765--- a/drivers/video/via/via_clock.h
41766+++ b/drivers/video/via/via_clock.h
41767@@ -56,7 +56,7 @@ struct via_clock {
41768
41769 void (*set_engine_pll_state)(u8 state);
41770 void (*set_engine_pll)(struct via_pll_config config);
41771-};
41772+} __no_const;
41773
41774
41775 static inline u32 get_pll_internal_frequency(u32 ref_freq,
41776diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41777index e56c934..fc22f4b 100644
41778--- a/drivers/xen/xen-pciback/conf_space.h
41779+++ b/drivers/xen/xen-pciback/conf_space.h
41780@@ -44,15 +44,15 @@ struct config_field {
41781 struct {
41782 conf_dword_write write;
41783 conf_dword_read read;
41784- } dw;
41785+ } __no_const dw;
41786 struct {
41787 conf_word_write write;
41788 conf_word_read read;
41789- } w;
41790+ } __no_const w;
41791 struct {
41792 conf_byte_write write;
41793 conf_byte_read read;
41794- } b;
41795+ } __no_const b;
41796 } u;
41797 struct list_head list;
41798 };
41799diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
41800index 014c8dd..6f3dfe6 100644
41801--- a/fs/9p/vfs_inode.c
41802+++ b/fs/9p/vfs_inode.c
41803@@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41804 void
41805 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41806 {
41807- char *s = nd_get_link(nd);
41808+ const char *s = nd_get_link(nd);
41809
41810 p9_debug(P9_DEBUG_VFS, " %s %s\n",
41811 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
41812diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
41813index e95d1b6..3454244 100644
41814--- a/fs/Kconfig.binfmt
41815+++ b/fs/Kconfig.binfmt
41816@@ -89,7 +89,7 @@ config HAVE_AOUT
41817
41818 config BINFMT_AOUT
41819 tristate "Kernel support for a.out and ECOFF binaries"
41820- depends on HAVE_AOUT
41821+ depends on HAVE_AOUT && BROKEN
41822 ---help---
41823 A.out (Assembler.OUTput) is a set of formats for libraries and
41824 executables used in the earliest versions of UNIX. Linux used
41825diff --git a/fs/aio.c b/fs/aio.c
41826index b9d64d8..86cb1d5 100644
41827--- a/fs/aio.c
41828+++ b/fs/aio.c
41829@@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
41830 size += sizeof(struct io_event) * nr_events;
41831 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41832
41833- if (nr_pages < 0)
41834+ if (nr_pages <= 0)
41835 return -EINVAL;
41836
41837 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
41838@@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
41839 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41840 {
41841 ssize_t ret;
41842+ struct iovec iovstack;
41843
41844 #ifdef CONFIG_COMPAT
41845 if (compat)
41846 ret = compat_rw_copy_check_uvector(type,
41847 (struct compat_iovec __user *)kiocb->ki_buf,
41848- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41849+ kiocb->ki_nbytes, 1, &iovstack,
41850 &kiocb->ki_iovec, 1);
41851 else
41852 #endif
41853 ret = rw_copy_check_uvector(type,
41854 (struct iovec __user *)kiocb->ki_buf,
41855- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41856+ kiocb->ki_nbytes, 1, &iovstack,
41857 &kiocb->ki_iovec, 1);
41858 if (ret < 0)
41859 goto out;
41860
41861+ if (kiocb->ki_iovec == &iovstack) {
41862+ kiocb->ki_inline_vec = iovstack;
41863+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
41864+ }
41865 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41866 kiocb->ki_cur_seg = 0;
41867 /* ki_nbytes/left now reflect bytes instead of segs */
41868diff --git a/fs/attr.c b/fs/attr.c
41869index 95053ad..2cc93ca 100644
41870--- a/fs/attr.c
41871+++ b/fs/attr.c
41872@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
41873 unsigned long limit;
41874
41875 limit = rlimit(RLIMIT_FSIZE);
41876+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41877 if (limit != RLIM_INFINITY && offset > limit)
41878 goto out_sig;
41879 if (offset > inode->i_sb->s_maxbytes)
41880diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41881index 9c098db..c755da5 100644
41882--- a/fs/autofs4/waitq.c
41883+++ b/fs/autofs4/waitq.c
41884@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
41885 {
41886 unsigned long sigpipe, flags;
41887 mm_segment_t fs;
41888- const char *data = (const char *)addr;
41889+ const char __user *data = (const char __force_user *)addr;
41890 ssize_t wr = 0;
41891
41892 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
41893diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41894index 6e6d536..457113a 100644
41895--- a/fs/befs/linuxvfs.c
41896+++ b/fs/befs/linuxvfs.c
41897@@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41898 {
41899 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41900 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41901- char *link = nd_get_link(nd);
41902+ const char *link = nd_get_link(nd);
41903 if (!IS_ERR(link))
41904 kfree(link);
41905 }
41906diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41907index 1ff9405..f1e376a 100644
41908--- a/fs/binfmt_aout.c
41909+++ b/fs/binfmt_aout.c
41910@@ -16,6 +16,7 @@
41911 #include <linux/string.h>
41912 #include <linux/fs.h>
41913 #include <linux/file.h>
41914+#include <linux/security.h>
41915 #include <linux/stat.h>
41916 #include <linux/fcntl.h>
41917 #include <linux/ptrace.h>
41918@@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
41919 #endif
41920 # define START_STACK(u) ((void __user *)u.start_stack)
41921
41922+ memset(&dump, 0, sizeof(dump));
41923+
41924 fs = get_fs();
41925 set_fs(KERNEL_DS);
41926 has_dumped = 1;
41927@@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41928
41929 /* If the size of the dump file exceeds the rlimit, then see what would happen
41930 if we wrote the stack, but not the data area. */
41931+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41932 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41933 dump.u_dsize = 0;
41934
41935 /* Make sure we have enough room to write the stack and data areas. */
41936+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41937 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41938 dump.u_ssize = 0;
41939
41940@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41941 rlim = rlimit(RLIMIT_DATA);
41942 if (rlim >= RLIM_INFINITY)
41943 rlim = ~0;
41944+
41945+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41946 if (ex.a_data + ex.a_bss > rlim)
41947 return -ENOMEM;
41948
41949@@ -269,6 +276,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41950 install_exec_creds(bprm);
41951 current->flags &= ~PF_FORKNOEXEC;
41952
41953+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41954+ current->mm->pax_flags = 0UL;
41955+#endif
41956+
41957+#ifdef CONFIG_PAX_PAGEEXEC
41958+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41959+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41960+
41961+#ifdef CONFIG_PAX_EMUTRAMP
41962+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41963+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41964+#endif
41965+
41966+#ifdef CONFIG_PAX_MPROTECT
41967+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41968+ current->mm->pax_flags |= MF_PAX_MPROTECT;
41969+#endif
41970+
41971+ }
41972+#endif
41973+
41974 if (N_MAGIC(ex) == OMAGIC) {
41975 unsigned long text_addr, map_size;
41976 loff_t pos;
41977@@ -341,7 +369,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41978
41979 down_write(&current->mm->mmap_sem);
41980 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41981- PROT_READ | PROT_WRITE | PROT_EXEC,
41982+ PROT_READ | PROT_WRITE,
41983 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41984 fd_offset + ex.a_text);
41985 up_write(&current->mm->mmap_sem);
41986diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41987index 07d096c..851a18b 100644
41988--- a/fs/binfmt_elf.c
41989+++ b/fs/binfmt_elf.c
41990@@ -32,6 +32,7 @@
41991 #include <linux/elf.h>
41992 #include <linux/utsname.h>
41993 #include <linux/coredump.h>
41994+#include <linux/xattr.h>
41995 #include <asm/uaccess.h>
41996 #include <asm/param.h>
41997 #include <asm/page.h>
41998@@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41999 #define elf_core_dump NULL
42000 #endif
42001
42002+#ifdef CONFIG_PAX_MPROTECT
42003+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
42004+#endif
42005+
42006 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
42007 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
42008 #else
42009@@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
42010 .load_binary = load_elf_binary,
42011 .load_shlib = load_elf_library,
42012 .core_dump = elf_core_dump,
42013+
42014+#ifdef CONFIG_PAX_MPROTECT
42015+ .handle_mprotect= elf_handle_mprotect,
42016+#endif
42017+
42018 .min_coredump = ELF_EXEC_PAGESIZE,
42019 };
42020
42021@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
42022
42023 static int set_brk(unsigned long start, unsigned long end)
42024 {
42025+ unsigned long e = end;
42026+
42027 start = ELF_PAGEALIGN(start);
42028 end = ELF_PAGEALIGN(end);
42029 if (end > start) {
42030@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
42031 if (BAD_ADDR(addr))
42032 return addr;
42033 }
42034- current->mm->start_brk = current->mm->brk = end;
42035+ current->mm->start_brk = current->mm->brk = e;
42036 return 0;
42037 }
42038
42039@@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42040 elf_addr_t __user *u_rand_bytes;
42041 const char *k_platform = ELF_PLATFORM;
42042 const char *k_base_platform = ELF_BASE_PLATFORM;
42043- unsigned char k_rand_bytes[16];
42044+ u32 k_rand_bytes[4];
42045 int items;
42046 elf_addr_t *elf_info;
42047 int ei_index = 0;
42048 const struct cred *cred = current_cred();
42049 struct vm_area_struct *vma;
42050+ unsigned long saved_auxv[AT_VECTOR_SIZE];
42051
42052 /*
42053 * In some cases (e.g. Hyper-Threading), we want to avoid L1
42054@@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42055 * Generate 16 random bytes for userspace PRNG seeding.
42056 */
42057 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
42058- u_rand_bytes = (elf_addr_t __user *)
42059- STACK_ALLOC(p, sizeof(k_rand_bytes));
42060+ srandom32(k_rand_bytes[0] ^ random32());
42061+ srandom32(k_rand_bytes[1] ^ random32());
42062+ srandom32(k_rand_bytes[2] ^ random32());
42063+ srandom32(k_rand_bytes[3] ^ random32());
42064+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
42065+ u_rand_bytes = (elf_addr_t __user *) p;
42066 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
42067 return -EFAULT;
42068
42069@@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42070 return -EFAULT;
42071 current->mm->env_end = p;
42072
42073+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
42074+
42075 /* Put the elf_info on the stack in the right place. */
42076 sp = (elf_addr_t __user *)envp + 1;
42077- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
42078+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
42079 return -EFAULT;
42080 return 0;
42081 }
42082@@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42083 {
42084 struct elf_phdr *elf_phdata;
42085 struct elf_phdr *eppnt;
42086- unsigned long load_addr = 0;
42087+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
42088 int load_addr_set = 0;
42089 unsigned long last_bss = 0, elf_bss = 0;
42090- unsigned long error = ~0UL;
42091+ unsigned long error = -EINVAL;
42092 unsigned long total_size;
42093 int retval, i, size;
42094
42095@@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42096 goto out_close;
42097 }
42098
42099+#ifdef CONFIG_PAX_SEGMEXEC
42100+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
42101+ pax_task_size = SEGMEXEC_TASK_SIZE;
42102+#endif
42103+
42104 eppnt = elf_phdata;
42105 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
42106 if (eppnt->p_type == PT_LOAD) {
42107@@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42108 k = load_addr + eppnt->p_vaddr;
42109 if (BAD_ADDR(k) ||
42110 eppnt->p_filesz > eppnt->p_memsz ||
42111- eppnt->p_memsz > TASK_SIZE ||
42112- TASK_SIZE - eppnt->p_memsz < k) {
42113+ eppnt->p_memsz > pax_task_size ||
42114+ pax_task_size - eppnt->p_memsz < k) {
42115 error = -ENOMEM;
42116 goto out_close;
42117 }
42118@@ -528,6 +552,351 @@ out:
42119 return error;
42120 }
42121
42122+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42123+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
42124+{
42125+ unsigned long pax_flags = 0UL;
42126+
42127+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42128+
42129+#ifdef CONFIG_PAX_PAGEEXEC
42130+ if (elf_phdata->p_flags & PF_PAGEEXEC)
42131+ pax_flags |= MF_PAX_PAGEEXEC;
42132+#endif
42133+
42134+#ifdef CONFIG_PAX_SEGMEXEC
42135+ if (elf_phdata->p_flags & PF_SEGMEXEC)
42136+ pax_flags |= MF_PAX_SEGMEXEC;
42137+#endif
42138+
42139+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42140+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42141+ if ((__supported_pte_mask & _PAGE_NX))
42142+ pax_flags &= ~MF_PAX_SEGMEXEC;
42143+ else
42144+ pax_flags &= ~MF_PAX_PAGEEXEC;
42145+ }
42146+#endif
42147+
42148+#ifdef CONFIG_PAX_EMUTRAMP
42149+ if (elf_phdata->p_flags & PF_EMUTRAMP)
42150+ pax_flags |= MF_PAX_EMUTRAMP;
42151+#endif
42152+
42153+#ifdef CONFIG_PAX_MPROTECT
42154+ if (elf_phdata->p_flags & PF_MPROTECT)
42155+ pax_flags |= MF_PAX_MPROTECT;
42156+#endif
42157+
42158+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42159+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
42160+ pax_flags |= MF_PAX_RANDMMAP;
42161+#endif
42162+
42163+#endif
42164+
42165+ return pax_flags;
42166+}
42167+
42168+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
42169+{
42170+ unsigned long pax_flags = 0UL;
42171+
42172+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42173+
42174+#ifdef CONFIG_PAX_PAGEEXEC
42175+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
42176+ pax_flags |= MF_PAX_PAGEEXEC;
42177+#endif
42178+
42179+#ifdef CONFIG_PAX_SEGMEXEC
42180+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
42181+ pax_flags |= MF_PAX_SEGMEXEC;
42182+#endif
42183+
42184+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42185+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42186+ if ((__supported_pte_mask & _PAGE_NX))
42187+ pax_flags &= ~MF_PAX_SEGMEXEC;
42188+ else
42189+ pax_flags &= ~MF_PAX_PAGEEXEC;
42190+ }
42191+#endif
42192+
42193+#ifdef CONFIG_PAX_EMUTRAMP
42194+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
42195+ pax_flags |= MF_PAX_EMUTRAMP;
42196+#endif
42197+
42198+#ifdef CONFIG_PAX_MPROTECT
42199+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
42200+ pax_flags |= MF_PAX_MPROTECT;
42201+#endif
42202+
42203+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42204+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
42205+ pax_flags |= MF_PAX_RANDMMAP;
42206+#endif
42207+
42208+#endif
42209+
42210+ return pax_flags;
42211+}
42212+
42213+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
42214+{
42215+ unsigned long pax_flags = 0UL;
42216+
42217+#ifdef CONFIG_PAX_EI_PAX
42218+
42219+#ifdef CONFIG_PAX_PAGEEXEC
42220+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
42221+ pax_flags |= MF_PAX_PAGEEXEC;
42222+#endif
42223+
42224+#ifdef CONFIG_PAX_SEGMEXEC
42225+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
42226+ pax_flags |= MF_PAX_SEGMEXEC;
42227+#endif
42228+
42229+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42230+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42231+ if ((__supported_pte_mask & _PAGE_NX))
42232+ pax_flags &= ~MF_PAX_SEGMEXEC;
42233+ else
42234+ pax_flags &= ~MF_PAX_PAGEEXEC;
42235+ }
42236+#endif
42237+
42238+#ifdef CONFIG_PAX_EMUTRAMP
42239+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
42240+ pax_flags |= MF_PAX_EMUTRAMP;
42241+#endif
42242+
42243+#ifdef CONFIG_PAX_MPROTECT
42244+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
42245+ pax_flags |= MF_PAX_MPROTECT;
42246+#endif
42247+
42248+#ifdef CONFIG_PAX_ASLR
42249+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
42250+ pax_flags |= MF_PAX_RANDMMAP;
42251+#endif
42252+
42253+#else
42254+
42255+#ifdef CONFIG_PAX_PAGEEXEC
42256+ pax_flags |= MF_PAX_PAGEEXEC;
42257+#endif
42258+
42259+#ifdef CONFIG_PAX_MPROTECT
42260+ pax_flags |= MF_PAX_MPROTECT;
42261+#endif
42262+
42263+#ifdef CONFIG_PAX_RANDMMAP
42264+ pax_flags |= MF_PAX_RANDMMAP;
42265+#endif
42266+
42267+#ifdef CONFIG_PAX_SEGMEXEC
42268+ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
42269+ pax_flags &= ~MF_PAX_PAGEEXEC;
42270+ pax_flags |= MF_PAX_SEGMEXEC;
42271+ }
42272+#endif
42273+
42274+#endif
42275+
42276+ return pax_flags;
42277+}
42278+
42279+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
42280+{
42281+
42282+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42283+ unsigned long i;
42284+
42285+ for (i = 0UL; i < elf_ex->e_phnum; i++)
42286+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
42287+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
42288+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
42289+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
42290+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
42291+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
42292+ return ~0UL;
42293+
42294+#ifdef CONFIG_PAX_SOFTMODE
42295+ if (pax_softmode)
42296+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
42297+ else
42298+#endif
42299+
42300+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
42301+ break;
42302+ }
42303+#endif
42304+
42305+ return ~0UL;
42306+}
42307+
42308+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42309+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
42310+{
42311+ unsigned long pax_flags = 0UL;
42312+
42313+#ifdef CONFIG_PAX_PAGEEXEC
42314+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
42315+ pax_flags |= MF_PAX_PAGEEXEC;
42316+#endif
42317+
42318+#ifdef CONFIG_PAX_SEGMEXEC
42319+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
42320+ pax_flags |= MF_PAX_SEGMEXEC;
42321+#endif
42322+
42323+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42324+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42325+ if ((__supported_pte_mask & _PAGE_NX))
42326+ pax_flags &= ~MF_PAX_SEGMEXEC;
42327+ else
42328+ pax_flags &= ~MF_PAX_PAGEEXEC;
42329+ }
42330+#endif
42331+
42332+#ifdef CONFIG_PAX_EMUTRAMP
42333+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
42334+ pax_flags |= MF_PAX_EMUTRAMP;
42335+#endif
42336+
42337+#ifdef CONFIG_PAX_MPROTECT
42338+ if (pax_flags_softmode & MF_PAX_MPROTECT)
42339+ pax_flags |= MF_PAX_MPROTECT;
42340+#endif
42341+
42342+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42343+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
42344+ pax_flags |= MF_PAX_RANDMMAP;
42345+#endif
42346+
42347+ return pax_flags;
42348+}
42349+
42350+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
42351+{
42352+ unsigned long pax_flags = 0UL;
42353+
42354+#ifdef CONFIG_PAX_PAGEEXEC
42355+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
42356+ pax_flags |= MF_PAX_PAGEEXEC;
42357+#endif
42358+
42359+#ifdef CONFIG_PAX_SEGMEXEC
42360+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
42361+ pax_flags |= MF_PAX_SEGMEXEC;
42362+#endif
42363+
42364+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42365+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42366+ if ((__supported_pte_mask & _PAGE_NX))
42367+ pax_flags &= ~MF_PAX_SEGMEXEC;
42368+ else
42369+ pax_flags &= ~MF_PAX_PAGEEXEC;
42370+ }
42371+#endif
42372+
42373+#ifdef CONFIG_PAX_EMUTRAMP
42374+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
42375+ pax_flags |= MF_PAX_EMUTRAMP;
42376+#endif
42377+
42378+#ifdef CONFIG_PAX_MPROTECT
42379+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
42380+ pax_flags |= MF_PAX_MPROTECT;
42381+#endif
42382+
42383+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42384+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
42385+ pax_flags |= MF_PAX_RANDMMAP;
42386+#endif
42387+
42388+ return pax_flags;
42389+}
42390+#endif
42391+
42392+static unsigned long pax_parse_xattr_pax(struct file * const file)
42393+{
42394+
42395+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42396+ ssize_t xattr_size, i;
42397+ unsigned char xattr_value[5];
42398+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
42399+
42400+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
42401+ if (xattr_size <= 0)
42402+ return ~0UL;
42403+
42404+ for (i = 0; i < xattr_size; i++)
42405+ switch (xattr_value[i]) {
42406+ default:
42407+ return ~0UL;
42408+
42409+#define parse_flag(option1, option2, flag) \
42410+ case option1: \
42411+ pax_flags_hardmode |= MF_PAX_##flag; \
42412+ break; \
42413+ case option2: \
42414+ pax_flags_softmode |= MF_PAX_##flag; \
42415+ break;
42416+
42417+ parse_flag('p', 'P', PAGEEXEC);
42418+ parse_flag('e', 'E', EMUTRAMP);
42419+ parse_flag('m', 'M', MPROTECT);
42420+ parse_flag('r', 'R', RANDMMAP);
42421+ parse_flag('s', 'S', SEGMEXEC);
42422+
42423+#undef parse_flag
42424+ }
42425+
42426+ if (pax_flags_hardmode & pax_flags_softmode)
42427+ return ~0UL;
42428+
42429+#ifdef CONFIG_PAX_SOFTMODE
42430+ if (pax_softmode)
42431+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
42432+ else
42433+#endif
42434+
42435+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
42436+#else
42437+ return ~0UL;
42438+#endif
42439+
42440+}
42441+
42442+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
42443+{
42444+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
42445+
42446+ pax_flags = pax_parse_ei_pax(elf_ex);
42447+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
42448+ xattr_pax_flags = pax_parse_xattr_pax(file);
42449+
42450+ if (pt_pax_flags == ~0UL)
42451+ pt_pax_flags = xattr_pax_flags;
42452+ else if (xattr_pax_flags == ~0UL)
42453+ xattr_pax_flags = pt_pax_flags;
42454+ if (pt_pax_flags != xattr_pax_flags)
42455+ return -EINVAL;
42456+ if (pt_pax_flags != ~0UL)
42457+ pax_flags = pt_pax_flags;
42458+
42459+ if (0 > pax_check_flags(&pax_flags))
42460+ return -EINVAL;
42461+
42462+ current->mm->pax_flags = pax_flags;
42463+ return 0;
42464+}
42465+#endif
42466+
42467 /*
42468 * These are the functions used to load ELF style executables and shared
42469 * libraries. There is no binary dependent code anywhere else.
42470@@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
42471 {
42472 unsigned int random_variable = 0;
42473
42474+#ifdef CONFIG_PAX_RANDUSTACK
42475+ if (randomize_va_space)
42476+ return stack_top - current->mm->delta_stack;
42477+#endif
42478+
42479 if ((current->flags & PF_RANDOMIZE) &&
42480 !(current->personality & ADDR_NO_RANDOMIZE)) {
42481 random_variable = get_random_int() & STACK_RND_MASK;
42482@@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42483 unsigned long load_addr = 0, load_bias = 0;
42484 int load_addr_set = 0;
42485 char * elf_interpreter = NULL;
42486- unsigned long error;
42487+ unsigned long error = 0;
42488 struct elf_phdr *elf_ppnt, *elf_phdata;
42489 unsigned long elf_bss, elf_brk;
42490 int retval, i;
42491@@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42492 unsigned long start_code, end_code, start_data, end_data;
42493 unsigned long reloc_func_desc __maybe_unused = 0;
42494 int executable_stack = EXSTACK_DEFAULT;
42495- unsigned long def_flags = 0;
42496 struct {
42497 struct elfhdr elf_ex;
42498 struct elfhdr interp_elf_ex;
42499 } *loc;
42500+ unsigned long pax_task_size = TASK_SIZE;
42501
42502 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42503 if (!loc) {
42504@@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42505
42506 /* OK, This is the point of no return */
42507 current->flags &= ~PF_FORKNOEXEC;
42508- current->mm->def_flags = def_flags;
42509+
42510+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42511+ current->mm->pax_flags = 0UL;
42512+#endif
42513+
42514+#ifdef CONFIG_PAX_DLRESOLVE
42515+ current->mm->call_dl_resolve = 0UL;
42516+#endif
42517+
42518+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42519+ current->mm->call_syscall = 0UL;
42520+#endif
42521+
42522+#ifdef CONFIG_PAX_ASLR
42523+ current->mm->delta_mmap = 0UL;
42524+ current->mm->delta_stack = 0UL;
42525+#endif
42526+
42527+ current->mm->def_flags = 0;
42528+
42529+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42530+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
42531+ send_sig(SIGKILL, current, 0);
42532+ goto out_free_dentry;
42533+ }
42534+#endif
42535+
42536+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42537+ pax_set_initial_flags(bprm);
42538+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42539+ if (pax_set_initial_flags_func)
42540+ (pax_set_initial_flags_func)(bprm);
42541+#endif
42542+
42543+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42544+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
42545+ current->mm->context.user_cs_limit = PAGE_SIZE;
42546+ current->mm->def_flags |= VM_PAGEEXEC;
42547+ }
42548+#endif
42549+
42550+#ifdef CONFIG_PAX_SEGMEXEC
42551+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42552+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42553+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42554+ pax_task_size = SEGMEXEC_TASK_SIZE;
42555+ current->mm->def_flags |= VM_NOHUGEPAGE;
42556+ }
42557+#endif
42558+
42559+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42560+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42561+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42562+ put_cpu();
42563+ }
42564+#endif
42565
42566 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42567 may depend on the personality. */
42568 SET_PERSONALITY(loc->elf_ex);
42569+
42570+#ifdef CONFIG_PAX_ASLR
42571+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42572+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42573+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42574+ }
42575+#endif
42576+
42577+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42578+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42579+ executable_stack = EXSTACK_DISABLE_X;
42580+ current->personality &= ~READ_IMPLIES_EXEC;
42581+ } else
42582+#endif
42583+
42584 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42585 current->personality |= READ_IMPLIES_EXEC;
42586
42587@@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42588 #else
42589 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42590 #endif
42591+
42592+#ifdef CONFIG_PAX_RANDMMAP
42593+ /* PaX: randomize base address at the default exe base if requested */
42594+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42595+#ifdef CONFIG_SPARC64
42596+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42597+#else
42598+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42599+#endif
42600+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42601+ elf_flags |= MAP_FIXED;
42602+ }
42603+#endif
42604+
42605 }
42606
42607 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
42608@@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42609 * allowed task size. Note that p_filesz must always be
42610 * <= p_memsz so it is only necessary to check p_memsz.
42611 */
42612- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42613- elf_ppnt->p_memsz > TASK_SIZE ||
42614- TASK_SIZE - elf_ppnt->p_memsz < k) {
42615+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42616+ elf_ppnt->p_memsz > pax_task_size ||
42617+ pax_task_size - elf_ppnt->p_memsz < k) {
42618 /* set_brk can never work. Avoid overflows. */
42619 send_sig(SIGKILL, current, 0);
42620 retval = -EINVAL;
42621@@ -881,11 +1339,36 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42622 goto out_free_dentry;
42623 }
42624 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42625- send_sig(SIGSEGV, current, 0);
42626- retval = -EFAULT; /* Nobody gets to see this, but.. */
42627- goto out_free_dentry;
42628+ /*
42629+ * This bss-zeroing can fail if the ELF
42630+ * file specifies odd protections. So
42631+ * we don't check the return value
42632+ */
42633 }
42634
42635+#ifdef CONFIG_PAX_RANDMMAP
42636+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42637+ unsigned long start, size;
42638+
42639+ start = ELF_PAGEALIGN(elf_brk);
42640+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
42641+ down_write(&current->mm->mmap_sem);
42642+ retval = -ENOMEM;
42643+ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
42644+ current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
42645+ start = do_mmap(NULL, start, size, PROT_NONE, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
42646+ retval = IS_ERR_VALUE(start) ? start : 0;
42647+ }
42648+ up_write(&current->mm->mmap_sem);
42649+ if (retval == 0)
42650+ retval = set_brk(start + size, start + size + PAGE_SIZE);
42651+ if (retval < 0) {
42652+ send_sig(SIGKILL, current, 0);
42653+ goto out_free_dentry;
42654+ }
42655+ }
42656+#endif
42657+
42658 if (elf_interpreter) {
42659 unsigned long uninitialized_var(interp_map_addr);
42660
42661@@ -1098,7 +1581,7 @@ out:
42662 * Decide what to dump of a segment, part, all or none.
42663 */
42664 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42665- unsigned long mm_flags)
42666+ unsigned long mm_flags, long signr)
42667 {
42668 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42669
42670@@ -1132,7 +1615,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
42671 if (vma->vm_file == NULL)
42672 return 0;
42673
42674- if (FILTER(MAPPED_PRIVATE))
42675+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42676 goto whole;
42677
42678 /*
42679@@ -1354,9 +1837,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
42680 {
42681 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42682 int i = 0;
42683- do
42684+ do {
42685 i += 2;
42686- while (auxv[i - 2] != AT_NULL);
42687+ } while (auxv[i - 2] != AT_NULL);
42688 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42689 }
42690
42691@@ -1862,14 +2345,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
42692 }
42693
42694 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42695- unsigned long mm_flags)
42696+ struct coredump_params *cprm)
42697 {
42698 struct vm_area_struct *vma;
42699 size_t size = 0;
42700
42701 for (vma = first_vma(current, gate_vma); vma != NULL;
42702 vma = next_vma(vma, gate_vma))
42703- size += vma_dump_size(vma, mm_flags);
42704+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42705 return size;
42706 }
42707
42708@@ -1963,7 +2446,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42709
42710 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42711
42712- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42713+ offset += elf_core_vma_data_size(gate_vma, cprm);
42714 offset += elf_core_extra_data_size();
42715 e_shoff = offset;
42716
42717@@ -1977,10 +2460,12 @@ static int elf_core_dump(struct coredump_params *cprm)
42718 offset = dataoff;
42719
42720 size += sizeof(*elf);
42721+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42722 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42723 goto end_coredump;
42724
42725 size += sizeof(*phdr4note);
42726+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42727 if (size > cprm->limit
42728 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42729 goto end_coredump;
42730@@ -1994,7 +2479,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42731 phdr.p_offset = offset;
42732 phdr.p_vaddr = vma->vm_start;
42733 phdr.p_paddr = 0;
42734- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42735+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42736 phdr.p_memsz = vma->vm_end - vma->vm_start;
42737 offset += phdr.p_filesz;
42738 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
42739@@ -2005,6 +2490,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42740 phdr.p_align = ELF_EXEC_PAGESIZE;
42741
42742 size += sizeof(phdr);
42743+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42744 if (size > cprm->limit
42745 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42746 goto end_coredump;
42747@@ -2029,7 +2515,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42748 unsigned long addr;
42749 unsigned long end;
42750
42751- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42752+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42753
42754 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42755 struct page *page;
42756@@ -2038,6 +2524,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42757 page = get_dump_page(addr);
42758 if (page) {
42759 void *kaddr = kmap(page);
42760+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42761 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42762 !dump_write(cprm->file, kaddr,
42763 PAGE_SIZE);
42764@@ -2055,6 +2542,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42765
42766 if (e_phnum == PN_XNUM) {
42767 size += sizeof(*shdr4extnum);
42768+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42769 if (size > cprm->limit
42770 || !dump_write(cprm->file, shdr4extnum,
42771 sizeof(*shdr4extnum)))
42772@@ -2075,6 +2563,97 @@ out:
42773
42774 #endif /* CONFIG_ELF_CORE */
42775
42776+#ifdef CONFIG_PAX_MPROTECT
42777+/* PaX: non-PIC ELF libraries need relocations on their executable segments
42778+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42779+ * we'll remove VM_MAYWRITE for good on RELRO segments.
42780+ *
42781+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42782+ * basis because we want to allow the common case and not the special ones.
42783+ */
42784+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42785+{
42786+ struct elfhdr elf_h;
42787+ struct elf_phdr elf_p;
42788+ unsigned long i;
42789+ unsigned long oldflags;
42790+ bool is_textrel_rw, is_textrel_rx, is_relro;
42791+
42792+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42793+ return;
42794+
42795+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42796+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42797+
42798+#ifdef CONFIG_PAX_ELFRELOCS
42799+ /* possible TEXTREL */
42800+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42801+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
42802+#else
42803+ is_textrel_rw = false;
42804+ is_textrel_rx = false;
42805+#endif
42806+
42807+ /* possible RELRO */
42808+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42809+
42810+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42811+ return;
42812+
42813+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42814+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42815+
42816+#ifdef CONFIG_PAX_ETEXECRELOCS
42817+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42818+#else
42819+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42820+#endif
42821+
42822+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42823+ !elf_check_arch(&elf_h) ||
42824+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42825+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42826+ return;
42827+
42828+ for (i = 0UL; i < elf_h.e_phnum; i++) {
42829+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42830+ return;
42831+ switch (elf_p.p_type) {
42832+ case PT_DYNAMIC:
42833+ if (!is_textrel_rw && !is_textrel_rx)
42834+ continue;
42835+ i = 0UL;
42836+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42837+ elf_dyn dyn;
42838+
42839+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42840+ return;
42841+ if (dyn.d_tag == DT_NULL)
42842+ return;
42843+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42844+ gr_log_textrel(vma);
42845+ if (is_textrel_rw)
42846+ vma->vm_flags |= VM_MAYWRITE;
42847+ else
42848+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42849+ vma->vm_flags &= ~VM_MAYWRITE;
42850+ return;
42851+ }
42852+ i++;
42853+ }
42854+ return;
42855+
42856+ case PT_GNU_RELRO:
42857+ if (!is_relro)
42858+ continue;
42859+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42860+ vma->vm_flags &= ~VM_MAYWRITE;
42861+ return;
42862+ }
42863+ }
42864+}
42865+#endif
42866+
42867 static int __init init_elf_binfmt(void)
42868 {
42869 return register_binfmt(&elf_format);
42870diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42871index 1bffbe0..c8c283e 100644
42872--- a/fs/binfmt_flat.c
42873+++ b/fs/binfmt_flat.c
42874@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
42875 realdatastart = (unsigned long) -ENOMEM;
42876 printk("Unable to allocate RAM for process data, errno %d\n",
42877 (int)-realdatastart);
42878+ down_write(&current->mm->mmap_sem);
42879 do_munmap(current->mm, textpos, text_len);
42880+ up_write(&current->mm->mmap_sem);
42881 ret = realdatastart;
42882 goto err;
42883 }
42884@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42885 }
42886 if (IS_ERR_VALUE(result)) {
42887 printk("Unable to read data+bss, errno %d\n", (int)-result);
42888+ down_write(&current->mm->mmap_sem);
42889 do_munmap(current->mm, textpos, text_len);
42890 do_munmap(current->mm, realdatastart, len);
42891+ up_write(&current->mm->mmap_sem);
42892 ret = result;
42893 goto err;
42894 }
42895@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42896 }
42897 if (IS_ERR_VALUE(result)) {
42898 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42899+ down_write(&current->mm->mmap_sem);
42900 do_munmap(current->mm, textpos, text_len + data_len + extra +
42901 MAX_SHARED_LIBS * sizeof(unsigned long));
42902+ up_write(&current->mm->mmap_sem);
42903 ret = result;
42904 goto err;
42905 }
42906diff --git a/fs/bio.c b/fs/bio.c
42907index b980ecd..74800bf 100644
42908--- a/fs/bio.c
42909+++ b/fs/bio.c
42910@@ -833,7 +833,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
42911 /*
42912 * Overflow, abort
42913 */
42914- if (end < start)
42915+ if (end < start || end - start > INT_MAX - nr_pages)
42916 return ERR_PTR(-EINVAL);
42917
42918 nr_pages += end - start;
42919@@ -1229,7 +1229,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
42920 const int read = bio_data_dir(bio) == READ;
42921 struct bio_map_data *bmd = bio->bi_private;
42922 int i;
42923- char *p = bmd->sgvecs[0].iov_base;
42924+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
42925
42926 __bio_for_each_segment(bvec, bio, i, 0) {
42927 char *addr = page_address(bvec->bv_page);
42928diff --git a/fs/block_dev.c b/fs/block_dev.c
42929index 5e9f198..6bf9b1c 100644
42930--- a/fs/block_dev.c
42931+++ b/fs/block_dev.c
42932@@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42933 else if (bdev->bd_contains == bdev)
42934 return true; /* is a whole device which isn't held */
42935
42936- else if (whole->bd_holder == bd_may_claim)
42937+ else if (whole->bd_holder == (void *)bd_may_claim)
42938 return true; /* is a partition of a device that is being partitioned */
42939 else if (whole->bd_holder != NULL)
42940 return false; /* is a partition of a held device */
42941diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
42942index d986824..af1befd 100644
42943--- a/fs/btrfs/check-integrity.c
42944+++ b/fs/btrfs/check-integrity.c
42945@@ -157,7 +157,7 @@ struct btrfsic_block {
42946 union {
42947 bio_end_io_t *bio;
42948 bh_end_io_t *bh;
42949- } orig_bio_bh_end_io;
42950+ } __no_const orig_bio_bh_end_io;
42951 int submit_bio_bh_rw;
42952 u64 flush_gen; /* only valid if !never_written */
42953 };
42954diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42955index 0639a55..7d9e07f 100644
42956--- a/fs/btrfs/ctree.c
42957+++ b/fs/btrfs/ctree.c
42958@@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42959 free_extent_buffer(buf);
42960 add_root_to_dirty_list(root);
42961 } else {
42962- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42963- parent_start = parent->start;
42964- else
42965+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42966+ if (parent)
42967+ parent_start = parent->start;
42968+ else
42969+ parent_start = 0;
42970+ } else
42971 parent_start = 0;
42972
42973 WARN_ON(trans->transid != btrfs_header_generation(parent));
42974diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42975index 892b347..b3db246 100644
42976--- a/fs/btrfs/inode.c
42977+++ b/fs/btrfs/inode.c
42978@@ -6930,7 +6930,7 @@ fail:
42979 return -ENOMEM;
42980 }
42981
42982-static int btrfs_getattr(struct vfsmount *mnt,
42983+int btrfs_getattr(struct vfsmount *mnt,
42984 struct dentry *dentry, struct kstat *stat)
42985 {
42986 struct inode *inode = dentry->d_inode;
42987@@ -6944,6 +6944,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42988 return 0;
42989 }
42990
42991+EXPORT_SYMBOL(btrfs_getattr);
42992+
42993+dev_t get_btrfs_dev_from_inode(struct inode *inode)
42994+{
42995+ return BTRFS_I(inode)->root->anon_dev;
42996+}
42997+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42998+
42999 /*
43000 * If a file is moved, it will inherit the cow and compression flags of the new
43001 * directory.
43002diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
43003index d8b5471..e5463d7 100644
43004--- a/fs/btrfs/ioctl.c
43005+++ b/fs/btrfs/ioctl.c
43006@@ -2783,9 +2783,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43007 for (i = 0; i < num_types; i++) {
43008 struct btrfs_space_info *tmp;
43009
43010+ /* Don't copy in more than we allocated */
43011 if (!slot_count)
43012 break;
43013
43014+ slot_count--;
43015+
43016 info = NULL;
43017 rcu_read_lock();
43018 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
43019@@ -2807,15 +2810,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43020 memcpy(dest, &space, sizeof(space));
43021 dest++;
43022 space_args.total_spaces++;
43023- slot_count--;
43024 }
43025- if (!slot_count)
43026- break;
43027 }
43028 up_read(&info->groups_sem);
43029 }
43030
43031- user_dest = (struct btrfs_ioctl_space_info *)
43032+ user_dest = (struct btrfs_ioctl_space_info __user *)
43033 (arg + sizeof(struct btrfs_ioctl_space_args));
43034
43035 if (copy_to_user(user_dest, dest_orig, alloc_size))
43036diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
43037index 8c1aae2..1e46446 100644
43038--- a/fs/btrfs/relocation.c
43039+++ b/fs/btrfs/relocation.c
43040@@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
43041 }
43042 spin_unlock(&rc->reloc_root_tree.lock);
43043
43044- BUG_ON((struct btrfs_root *)node->data != root);
43045+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
43046
43047 if (!del) {
43048 spin_lock(&rc->reloc_root_tree.lock);
43049diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
43050index 622f469..e8d2d55 100644
43051--- a/fs/cachefiles/bind.c
43052+++ b/fs/cachefiles/bind.c
43053@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
43054 args);
43055
43056 /* start by checking things over */
43057- ASSERT(cache->fstop_percent >= 0 &&
43058- cache->fstop_percent < cache->fcull_percent &&
43059+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
43060 cache->fcull_percent < cache->frun_percent &&
43061 cache->frun_percent < 100);
43062
43063- ASSERT(cache->bstop_percent >= 0 &&
43064- cache->bstop_percent < cache->bcull_percent &&
43065+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
43066 cache->bcull_percent < cache->brun_percent &&
43067 cache->brun_percent < 100);
43068
43069diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
43070index 0a1467b..6a53245 100644
43071--- a/fs/cachefiles/daemon.c
43072+++ b/fs/cachefiles/daemon.c
43073@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
43074 if (n > buflen)
43075 return -EMSGSIZE;
43076
43077- if (copy_to_user(_buffer, buffer, n) != 0)
43078+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
43079 return -EFAULT;
43080
43081 return n;
43082@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
43083 if (test_bit(CACHEFILES_DEAD, &cache->flags))
43084 return -EIO;
43085
43086- if (datalen < 0 || datalen > PAGE_SIZE - 1)
43087+ if (datalen > PAGE_SIZE - 1)
43088 return -EOPNOTSUPP;
43089
43090 /* drag the command string into the kernel so we can parse it */
43091@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
43092 if (args[0] != '%' || args[1] != '\0')
43093 return -EINVAL;
43094
43095- if (fstop < 0 || fstop >= cache->fcull_percent)
43096+ if (fstop >= cache->fcull_percent)
43097 return cachefiles_daemon_range_error(cache, args);
43098
43099 cache->fstop_percent = fstop;
43100@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
43101 if (args[0] != '%' || args[1] != '\0')
43102 return -EINVAL;
43103
43104- if (bstop < 0 || bstop >= cache->bcull_percent)
43105+ if (bstop >= cache->bcull_percent)
43106 return cachefiles_daemon_range_error(cache, args);
43107
43108 cache->bstop_percent = bstop;
43109diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
43110index bd6bc1b..b627b53 100644
43111--- a/fs/cachefiles/internal.h
43112+++ b/fs/cachefiles/internal.h
43113@@ -57,7 +57,7 @@ struct cachefiles_cache {
43114 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
43115 struct rb_root active_nodes; /* active nodes (can't be culled) */
43116 rwlock_t active_lock; /* lock for active_nodes */
43117- atomic_t gravecounter; /* graveyard uniquifier */
43118+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
43119 unsigned frun_percent; /* when to stop culling (% files) */
43120 unsigned fcull_percent; /* when to start culling (% files) */
43121 unsigned fstop_percent; /* when to stop allocating (% files) */
43122@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
43123 * proc.c
43124 */
43125 #ifdef CONFIG_CACHEFILES_HISTOGRAM
43126-extern atomic_t cachefiles_lookup_histogram[HZ];
43127-extern atomic_t cachefiles_mkdir_histogram[HZ];
43128-extern atomic_t cachefiles_create_histogram[HZ];
43129+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43130+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43131+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
43132
43133 extern int __init cachefiles_proc_init(void);
43134 extern void cachefiles_proc_cleanup(void);
43135 static inline
43136-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
43137+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
43138 {
43139 unsigned long jif = jiffies - start_jif;
43140 if (jif >= HZ)
43141 jif = HZ - 1;
43142- atomic_inc(&histogram[jif]);
43143+ atomic_inc_unchecked(&histogram[jif]);
43144 }
43145
43146 #else
43147diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
43148index a0358c2..d6137f2 100644
43149--- a/fs/cachefiles/namei.c
43150+++ b/fs/cachefiles/namei.c
43151@@ -318,7 +318,7 @@ try_again:
43152 /* first step is to make up a grave dentry in the graveyard */
43153 sprintf(nbuffer, "%08x%08x",
43154 (uint32_t) get_seconds(),
43155- (uint32_t) atomic_inc_return(&cache->gravecounter));
43156+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
43157
43158 /* do the multiway lock magic */
43159 trap = lock_rename(cache->graveyard, dir);
43160diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
43161index eccd339..4c1d995 100644
43162--- a/fs/cachefiles/proc.c
43163+++ b/fs/cachefiles/proc.c
43164@@ -14,9 +14,9 @@
43165 #include <linux/seq_file.h>
43166 #include "internal.h"
43167
43168-atomic_t cachefiles_lookup_histogram[HZ];
43169-atomic_t cachefiles_mkdir_histogram[HZ];
43170-atomic_t cachefiles_create_histogram[HZ];
43171+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43172+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43173+atomic_unchecked_t cachefiles_create_histogram[HZ];
43174
43175 /*
43176 * display the latency histogram
43177@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
43178 return 0;
43179 default:
43180 index = (unsigned long) v - 3;
43181- x = atomic_read(&cachefiles_lookup_histogram[index]);
43182- y = atomic_read(&cachefiles_mkdir_histogram[index]);
43183- z = atomic_read(&cachefiles_create_histogram[index]);
43184+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
43185+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
43186+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
43187 if (x == 0 && y == 0 && z == 0)
43188 return 0;
43189
43190diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
43191index 0e3c092..818480e 100644
43192--- a/fs/cachefiles/rdwr.c
43193+++ b/fs/cachefiles/rdwr.c
43194@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
43195 old_fs = get_fs();
43196 set_fs(KERNEL_DS);
43197 ret = file->f_op->write(
43198- file, (const void __user *) data, len, &pos);
43199+ file, (const void __force_user *) data, len, &pos);
43200 set_fs(old_fs);
43201 kunmap(page);
43202 if (ret != len)
43203diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
43204index 3e8094b..cb3ff3d 100644
43205--- a/fs/ceph/dir.c
43206+++ b/fs/ceph/dir.c
43207@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
43208 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
43209 struct ceph_mds_client *mdsc = fsc->mdsc;
43210 unsigned frag = fpos_frag(filp->f_pos);
43211- int off = fpos_off(filp->f_pos);
43212+ unsigned int off = fpos_off(filp->f_pos);
43213 int err;
43214 u32 ftype;
43215 struct ceph_mds_reply_info_parsed *rinfo;
43216@@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
43217 if (nd &&
43218 (nd->flags & LOOKUP_OPEN) &&
43219 !(nd->intent.open.flags & O_CREAT)) {
43220- int mode = nd->intent.open.create_mode & ~current->fs->umask;
43221+ int mode = nd->intent.open.create_mode & ~current_umask();
43222 return ceph_lookup_open(dir, dentry, nd, mode, 1);
43223 }
43224
43225diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
43226index cfd1ce3..6b13a74 100644
43227--- a/fs/cifs/asn1.c
43228+++ b/fs/cifs/asn1.c
43229@@ -416,6 +416,9 @@ asn1_subid_decode(struct asn1_ctx *ctx, unsigned long *subid)
43230
43231 static int
43232 asn1_oid_decode(struct asn1_ctx *ctx,
43233+ unsigned char *eoc, unsigned long **oid, unsigned int *len) __size_overflow(2);
43234+static int
43235+asn1_oid_decode(struct asn1_ctx *ctx,
43236 unsigned char *eoc, unsigned long **oid, unsigned int *len)
43237 {
43238 unsigned long subid;
43239diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
43240index 24b3dfc..3cd5454 100644
43241--- a/fs/cifs/cifs_debug.c
43242+++ b/fs/cifs/cifs_debug.c
43243@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43244
43245 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
43246 #ifdef CONFIG_CIFS_STATS2
43247- atomic_set(&totBufAllocCount, 0);
43248- atomic_set(&totSmBufAllocCount, 0);
43249+ atomic_set_unchecked(&totBufAllocCount, 0);
43250+ atomic_set_unchecked(&totSmBufAllocCount, 0);
43251 #endif /* CONFIG_CIFS_STATS2 */
43252 spin_lock(&cifs_tcp_ses_lock);
43253 list_for_each(tmp1, &cifs_tcp_ses_list) {
43254@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43255 tcon = list_entry(tmp3,
43256 struct cifs_tcon,
43257 tcon_list);
43258- atomic_set(&tcon->num_smbs_sent, 0);
43259- atomic_set(&tcon->num_writes, 0);
43260- atomic_set(&tcon->num_reads, 0);
43261- atomic_set(&tcon->num_oplock_brks, 0);
43262- atomic_set(&tcon->num_opens, 0);
43263- atomic_set(&tcon->num_posixopens, 0);
43264- atomic_set(&tcon->num_posixmkdirs, 0);
43265- atomic_set(&tcon->num_closes, 0);
43266- atomic_set(&tcon->num_deletes, 0);
43267- atomic_set(&tcon->num_mkdirs, 0);
43268- atomic_set(&tcon->num_rmdirs, 0);
43269- atomic_set(&tcon->num_renames, 0);
43270- atomic_set(&tcon->num_t2renames, 0);
43271- atomic_set(&tcon->num_ffirst, 0);
43272- atomic_set(&tcon->num_fnext, 0);
43273- atomic_set(&tcon->num_fclose, 0);
43274- atomic_set(&tcon->num_hardlinks, 0);
43275- atomic_set(&tcon->num_symlinks, 0);
43276- atomic_set(&tcon->num_locks, 0);
43277+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
43278+ atomic_set_unchecked(&tcon->num_writes, 0);
43279+ atomic_set_unchecked(&tcon->num_reads, 0);
43280+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
43281+ atomic_set_unchecked(&tcon->num_opens, 0);
43282+ atomic_set_unchecked(&tcon->num_posixopens, 0);
43283+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
43284+ atomic_set_unchecked(&tcon->num_closes, 0);
43285+ atomic_set_unchecked(&tcon->num_deletes, 0);
43286+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
43287+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
43288+ atomic_set_unchecked(&tcon->num_renames, 0);
43289+ atomic_set_unchecked(&tcon->num_t2renames, 0);
43290+ atomic_set_unchecked(&tcon->num_ffirst, 0);
43291+ atomic_set_unchecked(&tcon->num_fnext, 0);
43292+ atomic_set_unchecked(&tcon->num_fclose, 0);
43293+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
43294+ atomic_set_unchecked(&tcon->num_symlinks, 0);
43295+ atomic_set_unchecked(&tcon->num_locks, 0);
43296 }
43297 }
43298 }
43299@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43300 smBufAllocCount.counter, cifs_min_small);
43301 #ifdef CONFIG_CIFS_STATS2
43302 seq_printf(m, "Total Large %d Small %d Allocations\n",
43303- atomic_read(&totBufAllocCount),
43304- atomic_read(&totSmBufAllocCount));
43305+ atomic_read_unchecked(&totBufAllocCount),
43306+ atomic_read_unchecked(&totSmBufAllocCount));
43307 #endif /* CONFIG_CIFS_STATS2 */
43308
43309 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
43310@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43311 if (tcon->need_reconnect)
43312 seq_puts(m, "\tDISCONNECTED ");
43313 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
43314- atomic_read(&tcon->num_smbs_sent),
43315- atomic_read(&tcon->num_oplock_brks));
43316+ atomic_read_unchecked(&tcon->num_smbs_sent),
43317+ atomic_read_unchecked(&tcon->num_oplock_brks));
43318 seq_printf(m, "\nReads: %d Bytes: %lld",
43319- atomic_read(&tcon->num_reads),
43320+ atomic_read_unchecked(&tcon->num_reads),
43321 (long long)(tcon->bytes_read));
43322 seq_printf(m, "\nWrites: %d Bytes: %lld",
43323- atomic_read(&tcon->num_writes),
43324+ atomic_read_unchecked(&tcon->num_writes),
43325 (long long)(tcon->bytes_written));
43326 seq_printf(m, "\nFlushes: %d",
43327- atomic_read(&tcon->num_flushes));
43328+ atomic_read_unchecked(&tcon->num_flushes));
43329 seq_printf(m, "\nLocks: %d HardLinks: %d "
43330 "Symlinks: %d",
43331- atomic_read(&tcon->num_locks),
43332- atomic_read(&tcon->num_hardlinks),
43333- atomic_read(&tcon->num_symlinks));
43334+ atomic_read_unchecked(&tcon->num_locks),
43335+ atomic_read_unchecked(&tcon->num_hardlinks),
43336+ atomic_read_unchecked(&tcon->num_symlinks));
43337 seq_printf(m, "\nOpens: %d Closes: %d "
43338 "Deletes: %d",
43339- atomic_read(&tcon->num_opens),
43340- atomic_read(&tcon->num_closes),
43341- atomic_read(&tcon->num_deletes));
43342+ atomic_read_unchecked(&tcon->num_opens),
43343+ atomic_read_unchecked(&tcon->num_closes),
43344+ atomic_read_unchecked(&tcon->num_deletes));
43345 seq_printf(m, "\nPosix Opens: %d "
43346 "Posix Mkdirs: %d",
43347- atomic_read(&tcon->num_posixopens),
43348- atomic_read(&tcon->num_posixmkdirs));
43349+ atomic_read_unchecked(&tcon->num_posixopens),
43350+ atomic_read_unchecked(&tcon->num_posixmkdirs));
43351 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
43352- atomic_read(&tcon->num_mkdirs),
43353- atomic_read(&tcon->num_rmdirs));
43354+ atomic_read_unchecked(&tcon->num_mkdirs),
43355+ atomic_read_unchecked(&tcon->num_rmdirs));
43356 seq_printf(m, "\nRenames: %d T2 Renames %d",
43357- atomic_read(&tcon->num_renames),
43358- atomic_read(&tcon->num_t2renames));
43359+ atomic_read_unchecked(&tcon->num_renames),
43360+ atomic_read_unchecked(&tcon->num_t2renames));
43361 seq_printf(m, "\nFindFirst: %d FNext %d "
43362 "FClose %d",
43363- atomic_read(&tcon->num_ffirst),
43364- atomic_read(&tcon->num_fnext),
43365- atomic_read(&tcon->num_fclose));
43366+ atomic_read_unchecked(&tcon->num_ffirst),
43367+ atomic_read_unchecked(&tcon->num_fnext),
43368+ atomic_read_unchecked(&tcon->num_fclose));
43369 }
43370 }
43371 }
43372diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
43373index 6ee1cb4..8443157 100644
43374--- a/fs/cifs/cifsfs.c
43375+++ b/fs/cifs/cifsfs.c
43376@@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
43377 cifs_req_cachep = kmem_cache_create("cifs_request",
43378 CIFSMaxBufSize +
43379 MAX_CIFS_HDR_SIZE, 0,
43380- SLAB_HWCACHE_ALIGN, NULL);
43381+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
43382 if (cifs_req_cachep == NULL)
43383 return -ENOMEM;
43384
43385@@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
43386 efficient to alloc 1 per page off the slab compared to 17K (5page)
43387 alloc of large cifs buffers even when page debugging is on */
43388 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
43389- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
43390+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
43391 NULL);
43392 if (cifs_sm_req_cachep == NULL) {
43393 mempool_destroy(cifs_req_poolp);
43394@@ -1101,8 +1101,8 @@ init_cifs(void)
43395 atomic_set(&bufAllocCount, 0);
43396 atomic_set(&smBufAllocCount, 0);
43397 #ifdef CONFIG_CIFS_STATS2
43398- atomic_set(&totBufAllocCount, 0);
43399- atomic_set(&totSmBufAllocCount, 0);
43400+ atomic_set_unchecked(&totBufAllocCount, 0);
43401+ atomic_set_unchecked(&totSmBufAllocCount, 0);
43402 #endif /* CONFIG_CIFS_STATS2 */
43403
43404 atomic_set(&midCount, 0);
43405diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
43406index d47d20a..77e8b33 100644
43407--- a/fs/cifs/cifsglob.h
43408+++ b/fs/cifs/cifsglob.h
43409@@ -388,28 +388,28 @@ struct cifs_tcon {
43410 __u16 Flags; /* optional support bits */
43411 enum statusEnum tidStatus;
43412 #ifdef CONFIG_CIFS_STATS
43413- atomic_t num_smbs_sent;
43414- atomic_t num_writes;
43415- atomic_t num_reads;
43416- atomic_t num_flushes;
43417- atomic_t num_oplock_brks;
43418- atomic_t num_opens;
43419- atomic_t num_closes;
43420- atomic_t num_deletes;
43421- atomic_t num_mkdirs;
43422- atomic_t num_posixopens;
43423- atomic_t num_posixmkdirs;
43424- atomic_t num_rmdirs;
43425- atomic_t num_renames;
43426- atomic_t num_t2renames;
43427- atomic_t num_ffirst;
43428- atomic_t num_fnext;
43429- atomic_t num_fclose;
43430- atomic_t num_hardlinks;
43431- atomic_t num_symlinks;
43432- atomic_t num_locks;
43433- atomic_t num_acl_get;
43434- atomic_t num_acl_set;
43435+ atomic_unchecked_t num_smbs_sent;
43436+ atomic_unchecked_t num_writes;
43437+ atomic_unchecked_t num_reads;
43438+ atomic_unchecked_t num_flushes;
43439+ atomic_unchecked_t num_oplock_brks;
43440+ atomic_unchecked_t num_opens;
43441+ atomic_unchecked_t num_closes;
43442+ atomic_unchecked_t num_deletes;
43443+ atomic_unchecked_t num_mkdirs;
43444+ atomic_unchecked_t num_posixopens;
43445+ atomic_unchecked_t num_posixmkdirs;
43446+ atomic_unchecked_t num_rmdirs;
43447+ atomic_unchecked_t num_renames;
43448+ atomic_unchecked_t num_t2renames;
43449+ atomic_unchecked_t num_ffirst;
43450+ atomic_unchecked_t num_fnext;
43451+ atomic_unchecked_t num_fclose;
43452+ atomic_unchecked_t num_hardlinks;
43453+ atomic_unchecked_t num_symlinks;
43454+ atomic_unchecked_t num_locks;
43455+ atomic_unchecked_t num_acl_get;
43456+ atomic_unchecked_t num_acl_set;
43457 #ifdef CONFIG_CIFS_STATS2
43458 unsigned long long time_writes;
43459 unsigned long long time_reads;
43460@@ -624,7 +624,7 @@ convert_delimiter(char *path, char delim)
43461 }
43462
43463 #ifdef CONFIG_CIFS_STATS
43464-#define cifs_stats_inc atomic_inc
43465+#define cifs_stats_inc atomic_inc_unchecked
43466
43467 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
43468 unsigned int bytes)
43469@@ -983,8 +983,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
43470 /* Various Debug counters */
43471 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
43472 #ifdef CONFIG_CIFS_STATS2
43473-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
43474-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
43475+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
43476+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
43477 #endif
43478 GLOBAL_EXTERN atomic_t smBufAllocCount;
43479 GLOBAL_EXTERN atomic_t midCount;
43480diff --git a/fs/cifs/link.c b/fs/cifs/link.c
43481index 6b0e064..94e6c3c 100644
43482--- a/fs/cifs/link.c
43483+++ b/fs/cifs/link.c
43484@@ -600,7 +600,7 @@ symlink_exit:
43485
43486 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
43487 {
43488- char *p = nd_get_link(nd);
43489+ const char *p = nd_get_link(nd);
43490 if (!IS_ERR(p))
43491 kfree(p);
43492 }
43493diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
43494index 703ef5c..2a44ed5 100644
43495--- a/fs/cifs/misc.c
43496+++ b/fs/cifs/misc.c
43497@@ -156,7 +156,7 @@ cifs_buf_get(void)
43498 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
43499 atomic_inc(&bufAllocCount);
43500 #ifdef CONFIG_CIFS_STATS2
43501- atomic_inc(&totBufAllocCount);
43502+ atomic_inc_unchecked(&totBufAllocCount);
43503 #endif /* CONFIG_CIFS_STATS2 */
43504 }
43505
43506@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
43507 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
43508 atomic_inc(&smBufAllocCount);
43509 #ifdef CONFIG_CIFS_STATS2
43510- atomic_inc(&totSmBufAllocCount);
43511+ atomic_inc_unchecked(&totSmBufAllocCount);
43512 #endif /* CONFIG_CIFS_STATS2 */
43513
43514 }
43515diff --git a/fs/coda/cache.c b/fs/coda/cache.c
43516index 6901578..d402eb5 100644
43517--- a/fs/coda/cache.c
43518+++ b/fs/coda/cache.c
43519@@ -24,7 +24,7 @@
43520 #include "coda_linux.h"
43521 #include "coda_cache.h"
43522
43523-static atomic_t permission_epoch = ATOMIC_INIT(0);
43524+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
43525
43526 /* replace or extend an acl cache hit */
43527 void coda_cache_enter(struct inode *inode, int mask)
43528@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
43529 struct coda_inode_info *cii = ITOC(inode);
43530
43531 spin_lock(&cii->c_lock);
43532- cii->c_cached_epoch = atomic_read(&permission_epoch);
43533+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43534 if (cii->c_uid != current_fsuid()) {
43535 cii->c_uid = current_fsuid();
43536 cii->c_cached_perm = mask;
43537@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
43538 {
43539 struct coda_inode_info *cii = ITOC(inode);
43540 spin_lock(&cii->c_lock);
43541- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43542+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43543 spin_unlock(&cii->c_lock);
43544 }
43545
43546 /* remove all acl caches */
43547 void coda_cache_clear_all(struct super_block *sb)
43548 {
43549- atomic_inc(&permission_epoch);
43550+ atomic_inc_unchecked(&permission_epoch);
43551 }
43552
43553
43554@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
43555 spin_lock(&cii->c_lock);
43556 hit = (mask & cii->c_cached_perm) == mask &&
43557 cii->c_uid == current_fsuid() &&
43558- cii->c_cached_epoch == atomic_read(&permission_epoch);
43559+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43560 spin_unlock(&cii->c_lock);
43561
43562 return hit;
43563diff --git a/fs/compat.c b/fs/compat.c
43564index 07880ba..3fb2862 100644
43565--- a/fs/compat.c
43566+++ b/fs/compat.c
43567@@ -491,7 +491,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
43568
43569 set_fs(KERNEL_DS);
43570 /* The __user pointer cast is valid because of the set_fs() */
43571- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43572+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43573 set_fs(oldfs);
43574 /* truncating is ok because it's a user address */
43575 if (!ret)
43576@@ -549,7 +549,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
43577 goto out;
43578
43579 ret = -EINVAL;
43580- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43581+ if (nr_segs > UIO_MAXIOV)
43582 goto out;
43583 if (nr_segs > fast_segs) {
43584 ret = -ENOMEM;
43585@@ -832,6 +832,7 @@ struct compat_old_linux_dirent {
43586
43587 struct compat_readdir_callback {
43588 struct compat_old_linux_dirent __user *dirent;
43589+ struct file * file;
43590 int result;
43591 };
43592
43593@@ -849,6 +850,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
43594 buf->result = -EOVERFLOW;
43595 return -EOVERFLOW;
43596 }
43597+
43598+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43599+ return 0;
43600+
43601 buf->result++;
43602 dirent = buf->dirent;
43603 if (!access_ok(VERIFY_WRITE, dirent,
43604@@ -881,6 +886,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
43605
43606 buf.result = 0;
43607 buf.dirent = dirent;
43608+ buf.file = file;
43609
43610 error = vfs_readdir(file, compat_fillonedir, &buf);
43611 if (buf.result)
43612@@ -901,6 +907,7 @@ struct compat_linux_dirent {
43613 struct compat_getdents_callback {
43614 struct compat_linux_dirent __user *current_dir;
43615 struct compat_linux_dirent __user *previous;
43616+ struct file * file;
43617 int count;
43618 int error;
43619 };
43620@@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
43621 buf->error = -EOVERFLOW;
43622 return -EOVERFLOW;
43623 }
43624+
43625+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43626+ return 0;
43627+
43628 dirent = buf->previous;
43629 if (dirent) {
43630 if (__put_user(offset, &dirent->d_off))
43631@@ -969,6 +980,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
43632 buf.previous = NULL;
43633 buf.count = count;
43634 buf.error = 0;
43635+ buf.file = file;
43636
43637 error = vfs_readdir(file, compat_filldir, &buf);
43638 if (error >= 0)
43639@@ -990,6 +1002,7 @@ out:
43640 struct compat_getdents_callback64 {
43641 struct linux_dirent64 __user *current_dir;
43642 struct linux_dirent64 __user *previous;
43643+ struct file * file;
43644 int count;
43645 int error;
43646 };
43647@@ -1006,6 +1019,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
43648 buf->error = -EINVAL; /* only used if we fail.. */
43649 if (reclen > buf->count)
43650 return -EINVAL;
43651+
43652+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43653+ return 0;
43654+
43655 dirent = buf->previous;
43656
43657 if (dirent) {
43658@@ -1057,13 +1074,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
43659 buf.previous = NULL;
43660 buf.count = count;
43661 buf.error = 0;
43662+ buf.file = file;
43663
43664 error = vfs_readdir(file, compat_filldir64, &buf);
43665 if (error >= 0)
43666 error = buf.error;
43667 lastdirent = buf.previous;
43668 if (lastdirent) {
43669- typeof(lastdirent->d_off) d_off = file->f_pos;
43670+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43671 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43672 error = -EFAULT;
43673 else
43674diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43675index 112e45a..b59845b 100644
43676--- a/fs/compat_binfmt_elf.c
43677+++ b/fs/compat_binfmt_elf.c
43678@@ -30,11 +30,13 @@
43679 #undef elf_phdr
43680 #undef elf_shdr
43681 #undef elf_note
43682+#undef elf_dyn
43683 #undef elf_addr_t
43684 #define elfhdr elf32_hdr
43685 #define elf_phdr elf32_phdr
43686 #define elf_shdr elf32_shdr
43687 #define elf_note elf32_note
43688+#define elf_dyn Elf32_Dyn
43689 #define elf_addr_t Elf32_Addr
43690
43691 /*
43692diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
43693index a26bea1..ae23e72 100644
43694--- a/fs/compat_ioctl.c
43695+++ b/fs/compat_ioctl.c
43696@@ -211,6 +211,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
43697
43698 err = get_user(palp, &up->palette);
43699 err |= get_user(length, &up->length);
43700+ if (err)
43701+ return -EFAULT;
43702
43703 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43704 err = put_user(compat_ptr(palp), &up_native->palette);
43705@@ -622,7 +624,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
43706 return -EFAULT;
43707 if (__get_user(udata, &ss32->iomem_base))
43708 return -EFAULT;
43709- ss.iomem_base = compat_ptr(udata);
43710+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43711 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43712 __get_user(ss.port_high, &ss32->port_high))
43713 return -EFAULT;
43714@@ -797,7 +799,7 @@ static int compat_ioctl_preallocate(struct file *file,
43715 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43716 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43717 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43718- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43719+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43720 return -EFAULT;
43721
43722 return ioctl_preallocate(file, p);
43723@@ -1611,8 +1613,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
43724 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43725 {
43726 unsigned int a, b;
43727- a = *(unsigned int *)p;
43728- b = *(unsigned int *)q;
43729+ a = *(const unsigned int *)p;
43730+ b = *(const unsigned int *)q;
43731 if (a > b)
43732 return 1;
43733 if (a < b)
43734diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
43735index 5ddd7eb..c18bf04 100644
43736--- a/fs/configfs/dir.c
43737+++ b/fs/configfs/dir.c
43738@@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43739 }
43740 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43741 struct configfs_dirent *next;
43742- const char * name;
43743+ const unsigned char * name;
43744+ char d_name[sizeof(next->s_dentry->d_iname)];
43745 int len;
43746 struct inode *inode = NULL;
43747
43748@@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43749 continue;
43750
43751 name = configfs_get_name(next);
43752- len = strlen(name);
43753+ if (next->s_dentry && name == next->s_dentry->d_iname) {
43754+ len = next->s_dentry->d_name.len;
43755+ memcpy(d_name, name, len);
43756+ name = d_name;
43757+ } else
43758+ len = strlen(name);
43759
43760 /*
43761 * We'll have a dentry and an inode for
43762diff --git a/fs/configfs/file.c b/fs/configfs/file.c
43763index 2b6cb23..d76e879 100644
43764--- a/fs/configfs/file.c
43765+++ b/fs/configfs/file.c
43766@@ -135,6 +135,8 @@ out:
43767 */
43768
43769 static int
43770+fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size_t count) __size_overflow(3);
43771+static int
43772 fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size_t count)
43773 {
43774 int error;
43775diff --git a/fs/dcache.c b/fs/dcache.c
43776index 2576d14..0cec38d 100644
43777--- a/fs/dcache.c
43778+++ b/fs/dcache.c
43779@@ -105,10 +105,10 @@ static unsigned int d_hash_shift __read_mostly;
43780 static struct hlist_bl_head *dentry_hashtable __read_mostly;
43781
43782 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
43783- unsigned long hash)
43784+ unsigned int hash)
43785 {
43786- hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
43787- hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
43788+ hash += (unsigned long) parent / L1_CACHE_BYTES;
43789+ hash = hash + (hash >> D_HASHBITS);
43790 return dentry_hashtable + (hash & D_HASHMASK);
43791 }
43792
43793@@ -3067,7 +3067,7 @@ void __init vfs_caches_init(unsigned long mempages)
43794 mempages -= reserve;
43795
43796 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43797- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43798+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43799
43800 dcache_init();
43801 inode_init();
43802diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
43803index 956d5dd..e755e04 100644
43804--- a/fs/debugfs/inode.c
43805+++ b/fs/debugfs/inode.c
43806@@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
43807 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
43808 {
43809 return debugfs_create_file(name,
43810+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43811+ S_IFDIR | S_IRWXU,
43812+#else
43813 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43814+#endif
43815 parent, NULL, NULL);
43816 }
43817 EXPORT_SYMBOL_GPL(debugfs_create_dir);
43818diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43819index ab35b11..b30af66 100644
43820--- a/fs/ecryptfs/inode.c
43821+++ b/fs/ecryptfs/inode.c
43822@@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
43823 old_fs = get_fs();
43824 set_fs(get_ds());
43825 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43826- (char __user *)lower_buf,
43827+ (char __force_user *)lower_buf,
43828 lower_bufsiz);
43829 set_fs(old_fs);
43830 if (rc < 0)
43831@@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43832 }
43833 old_fs = get_fs();
43834 set_fs(get_ds());
43835- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
43836+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
43837 set_fs(old_fs);
43838 if (rc < 0) {
43839 kfree(buf);
43840@@ -733,7 +733,7 @@ out:
43841 static void
43842 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43843 {
43844- char *buf = nd_get_link(nd);
43845+ const char *buf = nd_get_link(nd);
43846 if (!IS_ERR(buf)) {
43847 /* Free the char* */
43848 kfree(buf);
43849diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43850index 3a06f40..f7af544 100644
43851--- a/fs/ecryptfs/miscdev.c
43852+++ b/fs/ecryptfs/miscdev.c
43853@@ -345,7 +345,7 @@ check_list:
43854 goto out_unlock_msg_ctx;
43855 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
43856 if (msg_ctx->msg) {
43857- if (copy_to_user(&buf[i], packet_length, packet_length_size))
43858+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43859 goto out_unlock_msg_ctx;
43860 i += packet_length_size;
43861 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
43862diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43863index b2a34a1..162fa69 100644
43864--- a/fs/ecryptfs/read_write.c
43865+++ b/fs/ecryptfs/read_write.c
43866@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
43867 return -EIO;
43868 fs_save = get_fs();
43869 set_fs(get_ds());
43870- rc = vfs_write(lower_file, data, size, &offset);
43871+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43872 set_fs(fs_save);
43873 mark_inode_dirty_sync(ecryptfs_inode);
43874 return rc;
43875@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
43876 return -EIO;
43877 fs_save = get_fs();
43878 set_fs(get_ds());
43879- rc = vfs_read(lower_file, data, size, &offset);
43880+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43881 set_fs(fs_save);
43882 return rc;
43883 }
43884diff --git a/fs/exec.c b/fs/exec.c
43885index 153dee1..ab4ebe9 100644
43886--- a/fs/exec.c
43887+++ b/fs/exec.c
43888@@ -55,6 +55,13 @@
43889 #include <linux/pipe_fs_i.h>
43890 #include <linux/oom.h>
43891 #include <linux/compat.h>
43892+#include <linux/random.h>
43893+#include <linux/seq_file.h>
43894+
43895+#ifdef CONFIG_PAX_REFCOUNT
43896+#include <linux/kallsyms.h>
43897+#include <linux/kdebug.h>
43898+#endif
43899
43900 #include <asm/uaccess.h>
43901 #include <asm/mmu_context.h>
43902@@ -63,6 +70,15 @@
43903 #include <trace/events/task.h>
43904 #include "internal.h"
43905
43906+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
43907+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
43908+#endif
43909+
43910+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43911+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43912+EXPORT_SYMBOL(pax_set_initial_flags_func);
43913+#endif
43914+
43915 int core_uses_pid;
43916 char core_pattern[CORENAME_MAX_SIZE] = "core";
43917 unsigned int core_pipe_limit;
43918@@ -72,7 +88,7 @@ struct core_name {
43919 char *corename;
43920 int used, size;
43921 };
43922-static atomic_t call_count = ATOMIC_INIT(1);
43923+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43924
43925 /* The maximal length of core_pattern is also specified in sysctl.c */
43926
43927@@ -190,18 +206,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43928 int write)
43929 {
43930 struct page *page;
43931- int ret;
43932
43933-#ifdef CONFIG_STACK_GROWSUP
43934- if (write) {
43935- ret = expand_downwards(bprm->vma, pos);
43936- if (ret < 0)
43937- return NULL;
43938- }
43939-#endif
43940- ret = get_user_pages(current, bprm->mm, pos,
43941- 1, write, 1, &page, NULL);
43942- if (ret <= 0)
43943+ if (0 > expand_downwards(bprm->vma, pos))
43944+ return NULL;
43945+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43946 return NULL;
43947
43948 if (write) {
43949@@ -217,6 +225,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43950 if (size <= ARG_MAX)
43951 return page;
43952
43953+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43954+ // only allow 512KB for argv+env on suid/sgid binaries
43955+ // to prevent easy ASLR exhaustion
43956+ if (((bprm->cred->euid != current_euid()) ||
43957+ (bprm->cred->egid != current_egid())) &&
43958+ (size > (512 * 1024))) {
43959+ put_page(page);
43960+ return NULL;
43961+ }
43962+#endif
43963+
43964 /*
43965 * Limit to 1/4-th the stack size for the argv+env strings.
43966 * This ensures that:
43967@@ -276,6 +295,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43968 vma->vm_end = STACK_TOP_MAX;
43969 vma->vm_start = vma->vm_end - PAGE_SIZE;
43970 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43971+
43972+#ifdef CONFIG_PAX_SEGMEXEC
43973+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43974+#endif
43975+
43976 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43977 INIT_LIST_HEAD(&vma->anon_vma_chain);
43978
43979@@ -290,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43980 mm->stack_vm = mm->total_vm = 1;
43981 up_write(&mm->mmap_sem);
43982 bprm->p = vma->vm_end - sizeof(void *);
43983+
43984+#ifdef CONFIG_PAX_RANDUSTACK
43985+ if (randomize_va_space)
43986+ bprm->p ^= random32() & ~PAGE_MASK;
43987+#endif
43988+
43989 return 0;
43990 err:
43991 up_write(&mm->mmap_sem);
43992@@ -398,19 +428,7 @@ err:
43993 return err;
43994 }
43995
43996-struct user_arg_ptr {
43997-#ifdef CONFIG_COMPAT
43998- bool is_compat;
43999-#endif
44000- union {
44001- const char __user *const __user *native;
44002-#ifdef CONFIG_COMPAT
44003- compat_uptr_t __user *compat;
44004-#endif
44005- } ptr;
44006-};
44007-
44008-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44009+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44010 {
44011 const char __user *native;
44012
44013@@ -419,14 +437,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44014 compat_uptr_t compat;
44015
44016 if (get_user(compat, argv.ptr.compat + nr))
44017- return ERR_PTR(-EFAULT);
44018+ return (const char __force_user *)ERR_PTR(-EFAULT);
44019
44020 return compat_ptr(compat);
44021 }
44022 #endif
44023
44024 if (get_user(native, argv.ptr.native + nr))
44025- return ERR_PTR(-EFAULT);
44026+ return (const char __force_user *)ERR_PTR(-EFAULT);
44027
44028 return native;
44029 }
44030@@ -445,7 +463,7 @@ static int count(struct user_arg_ptr argv, int max)
44031 if (!p)
44032 break;
44033
44034- if (IS_ERR(p))
44035+ if (IS_ERR((const char __force_kernel *)p))
44036 return -EFAULT;
44037
44038 if (i++ >= max)
44039@@ -479,7 +497,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
44040
44041 ret = -EFAULT;
44042 str = get_user_arg_ptr(argv, argc);
44043- if (IS_ERR(str))
44044+ if (IS_ERR((const char __force_kernel *)str))
44045 goto out;
44046
44047 len = strnlen_user(str, MAX_ARG_STRLEN);
44048@@ -561,7 +579,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
44049 int r;
44050 mm_segment_t oldfs = get_fs();
44051 struct user_arg_ptr argv = {
44052- .ptr.native = (const char __user *const __user *)__argv,
44053+ .ptr.native = (const char __force_user *const __force_user *)__argv,
44054 };
44055
44056 set_fs(KERNEL_DS);
44057@@ -596,7 +614,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44058 unsigned long new_end = old_end - shift;
44059 struct mmu_gather tlb;
44060
44061- BUG_ON(new_start > new_end);
44062+ if (new_start >= new_end || new_start < mmap_min_addr)
44063+ return -ENOMEM;
44064
44065 /*
44066 * ensure there are no vmas between where we want to go
44067@@ -605,6 +624,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44068 if (vma != find_vma(mm, new_start))
44069 return -EFAULT;
44070
44071+#ifdef CONFIG_PAX_SEGMEXEC
44072+ BUG_ON(pax_find_mirror_vma(vma));
44073+#endif
44074+
44075 /*
44076 * cover the whole range: [new_start, old_end)
44077 */
44078@@ -685,10 +708,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44079 stack_top = arch_align_stack(stack_top);
44080 stack_top = PAGE_ALIGN(stack_top);
44081
44082- if (unlikely(stack_top < mmap_min_addr) ||
44083- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
44084- return -ENOMEM;
44085-
44086 stack_shift = vma->vm_end - stack_top;
44087
44088 bprm->p -= stack_shift;
44089@@ -700,8 +719,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
44090 bprm->exec -= stack_shift;
44091
44092 down_write(&mm->mmap_sem);
44093+
44094+ /* Move stack pages down in memory. */
44095+ if (stack_shift) {
44096+ ret = shift_arg_pages(vma, stack_shift);
44097+ if (ret)
44098+ goto out_unlock;
44099+ }
44100+
44101 vm_flags = VM_STACK_FLAGS;
44102
44103+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44104+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44105+ vm_flags &= ~VM_EXEC;
44106+
44107+#ifdef CONFIG_PAX_MPROTECT
44108+ if (mm->pax_flags & MF_PAX_MPROTECT)
44109+ vm_flags &= ~VM_MAYEXEC;
44110+#endif
44111+
44112+ }
44113+#endif
44114+
44115 /*
44116 * Adjust stack execute permissions; explicitly enable for
44117 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
44118@@ -720,13 +759,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44119 goto out_unlock;
44120 BUG_ON(prev != vma);
44121
44122- /* Move stack pages down in memory. */
44123- if (stack_shift) {
44124- ret = shift_arg_pages(vma, stack_shift);
44125- if (ret)
44126- goto out_unlock;
44127- }
44128-
44129 /* mprotect_fixup is overkill to remove the temporary stack flags */
44130 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
44131
44132@@ -807,7 +839,7 @@ int kernel_read(struct file *file, loff_t offset,
44133 old_fs = get_fs();
44134 set_fs(get_ds());
44135 /* The cast to a user pointer is valid due to the set_fs() */
44136- result = vfs_read(file, (void __user *)addr, count, &pos);
44137+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
44138 set_fs(old_fs);
44139 return result;
44140 }
44141@@ -1252,7 +1284,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
44142 }
44143 rcu_read_unlock();
44144
44145- if (p->fs->users > n_fs) {
44146+ if (atomic_read(&p->fs->users) > n_fs) {
44147 bprm->unsafe |= LSM_UNSAFE_SHARE;
44148 } else {
44149 res = -EAGAIN;
44150@@ -1447,6 +1479,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
44151
44152 EXPORT_SYMBOL(search_binary_handler);
44153
44154+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44155+static DEFINE_PER_CPU(u64, exec_counter);
44156+static int __init init_exec_counters(void)
44157+{
44158+ unsigned int cpu;
44159+
44160+ for_each_possible_cpu(cpu) {
44161+ per_cpu(exec_counter, cpu) = (u64)cpu;
44162+ }
44163+
44164+ return 0;
44165+}
44166+early_initcall(init_exec_counters);
44167+static inline void increment_exec_counter(void)
44168+{
44169+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
44170+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
44171+}
44172+#else
44173+static inline void increment_exec_counter(void) {}
44174+#endif
44175+
44176 /*
44177 * sys_execve() executes a new program.
44178 */
44179@@ -1455,6 +1509,11 @@ static int do_execve_common(const char *filename,
44180 struct user_arg_ptr envp,
44181 struct pt_regs *regs)
44182 {
44183+#ifdef CONFIG_GRKERNSEC
44184+ struct file *old_exec_file;
44185+ struct acl_subject_label *old_acl;
44186+ struct rlimit old_rlim[RLIM_NLIMITS];
44187+#endif
44188 struct linux_binprm *bprm;
44189 struct file *file;
44190 struct files_struct *displaced;
44191@@ -1462,6 +1521,8 @@ static int do_execve_common(const char *filename,
44192 int retval;
44193 const struct cred *cred = current_cred();
44194
44195+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
44196+
44197 /*
44198 * We move the actual failure in case of RLIMIT_NPROC excess from
44199 * set*uid() to execve() because too many poorly written programs
44200@@ -1502,12 +1563,27 @@ static int do_execve_common(const char *filename,
44201 if (IS_ERR(file))
44202 goto out_unmark;
44203
44204+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
44205+ retval = -EPERM;
44206+ goto out_file;
44207+ }
44208+
44209 sched_exec();
44210
44211 bprm->file = file;
44212 bprm->filename = filename;
44213 bprm->interp = filename;
44214
44215+ if (gr_process_user_ban()) {
44216+ retval = -EPERM;
44217+ goto out_file;
44218+ }
44219+
44220+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
44221+ retval = -EACCES;
44222+ goto out_file;
44223+ }
44224+
44225 retval = bprm_mm_init(bprm);
44226 if (retval)
44227 goto out_file;
44228@@ -1524,24 +1600,65 @@ static int do_execve_common(const char *filename,
44229 if (retval < 0)
44230 goto out;
44231
44232+#ifdef CONFIG_GRKERNSEC
44233+ old_acl = current->acl;
44234+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
44235+ old_exec_file = current->exec_file;
44236+ get_file(file);
44237+ current->exec_file = file;
44238+#endif
44239+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44240+ /* limit suid stack to 8MB
44241+ we saved the old limits above and will restore them if this exec fails
44242+ */
44243+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
44244+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
44245+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
44246+#endif
44247+
44248+ if (!gr_tpe_allow(file)) {
44249+ retval = -EACCES;
44250+ goto out_fail;
44251+ }
44252+
44253+ if (gr_check_crash_exec(file)) {
44254+ retval = -EACCES;
44255+ goto out_fail;
44256+ }
44257+
44258+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
44259+ bprm->unsafe);
44260+ if (retval < 0)
44261+ goto out_fail;
44262+
44263 retval = copy_strings_kernel(1, &bprm->filename, bprm);
44264 if (retval < 0)
44265- goto out;
44266+ goto out_fail;
44267
44268 bprm->exec = bprm->p;
44269 retval = copy_strings(bprm->envc, envp, bprm);
44270 if (retval < 0)
44271- goto out;
44272+ goto out_fail;
44273
44274 retval = copy_strings(bprm->argc, argv, bprm);
44275 if (retval < 0)
44276- goto out;
44277+ goto out_fail;
44278+
44279+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
44280+
44281+ gr_handle_exec_args(bprm, argv);
44282
44283 retval = search_binary_handler(bprm,regs);
44284 if (retval < 0)
44285- goto out;
44286+ goto out_fail;
44287+#ifdef CONFIG_GRKERNSEC
44288+ if (old_exec_file)
44289+ fput(old_exec_file);
44290+#endif
44291
44292 /* execve succeeded */
44293+
44294+ increment_exec_counter();
44295 current->fs->in_exec = 0;
44296 current->in_execve = 0;
44297 acct_update_integrals(current);
44298@@ -1550,6 +1667,14 @@ static int do_execve_common(const char *filename,
44299 put_files_struct(displaced);
44300 return retval;
44301
44302+out_fail:
44303+#ifdef CONFIG_GRKERNSEC
44304+ current->acl = old_acl;
44305+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
44306+ fput(current->exec_file);
44307+ current->exec_file = old_exec_file;
44308+#endif
44309+
44310 out:
44311 if (bprm->mm) {
44312 acct_arg_size(bprm, 0);
44313@@ -1623,7 +1748,7 @@ static int expand_corename(struct core_name *cn)
44314 {
44315 char *old_corename = cn->corename;
44316
44317- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
44318+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
44319 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
44320
44321 if (!cn->corename) {
44322@@ -1720,7 +1845,7 @@ static int format_corename(struct core_name *cn, long signr)
44323 int pid_in_pattern = 0;
44324 int err = 0;
44325
44326- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
44327+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
44328 cn->corename = kmalloc(cn->size, GFP_KERNEL);
44329 cn->used = 0;
44330
44331@@ -1817,6 +1942,228 @@ out:
44332 return ispipe;
44333 }
44334
44335+int pax_check_flags(unsigned long *flags)
44336+{
44337+ int retval = 0;
44338+
44339+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
44340+ if (*flags & MF_PAX_SEGMEXEC)
44341+ {
44342+ *flags &= ~MF_PAX_SEGMEXEC;
44343+ retval = -EINVAL;
44344+ }
44345+#endif
44346+
44347+ if ((*flags & MF_PAX_PAGEEXEC)
44348+
44349+#ifdef CONFIG_PAX_PAGEEXEC
44350+ && (*flags & MF_PAX_SEGMEXEC)
44351+#endif
44352+
44353+ )
44354+ {
44355+ *flags &= ~MF_PAX_PAGEEXEC;
44356+ retval = -EINVAL;
44357+ }
44358+
44359+ if ((*flags & MF_PAX_MPROTECT)
44360+
44361+#ifdef CONFIG_PAX_MPROTECT
44362+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44363+#endif
44364+
44365+ )
44366+ {
44367+ *flags &= ~MF_PAX_MPROTECT;
44368+ retval = -EINVAL;
44369+ }
44370+
44371+ if ((*flags & MF_PAX_EMUTRAMP)
44372+
44373+#ifdef CONFIG_PAX_EMUTRAMP
44374+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44375+#endif
44376+
44377+ )
44378+ {
44379+ *flags &= ~MF_PAX_EMUTRAMP;
44380+ retval = -EINVAL;
44381+ }
44382+
44383+ return retval;
44384+}
44385+
44386+EXPORT_SYMBOL(pax_check_flags);
44387+
44388+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44389+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
44390+{
44391+ struct task_struct *tsk = current;
44392+ struct mm_struct *mm = current->mm;
44393+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
44394+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
44395+ char *path_exec = NULL;
44396+ char *path_fault = NULL;
44397+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
44398+
44399+ if (buffer_exec && buffer_fault) {
44400+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
44401+
44402+ down_read(&mm->mmap_sem);
44403+ vma = mm->mmap;
44404+ while (vma && (!vma_exec || !vma_fault)) {
44405+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
44406+ vma_exec = vma;
44407+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
44408+ vma_fault = vma;
44409+ vma = vma->vm_next;
44410+ }
44411+ if (vma_exec) {
44412+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
44413+ if (IS_ERR(path_exec))
44414+ path_exec = "<path too long>";
44415+ else {
44416+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
44417+ if (path_exec) {
44418+ *path_exec = 0;
44419+ path_exec = buffer_exec;
44420+ } else
44421+ path_exec = "<path too long>";
44422+ }
44423+ }
44424+ if (vma_fault) {
44425+ start = vma_fault->vm_start;
44426+ end = vma_fault->vm_end;
44427+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
44428+ if (vma_fault->vm_file) {
44429+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
44430+ if (IS_ERR(path_fault))
44431+ path_fault = "<path too long>";
44432+ else {
44433+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
44434+ if (path_fault) {
44435+ *path_fault = 0;
44436+ path_fault = buffer_fault;
44437+ } else
44438+ path_fault = "<path too long>";
44439+ }
44440+ } else
44441+ path_fault = "<anonymous mapping>";
44442+ }
44443+ up_read(&mm->mmap_sem);
44444+ }
44445+ if (tsk->signal->curr_ip)
44446+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
44447+ else
44448+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
44449+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
44450+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
44451+ task_uid(tsk), task_euid(tsk), pc, sp);
44452+ free_page((unsigned long)buffer_exec);
44453+ free_page((unsigned long)buffer_fault);
44454+ pax_report_insns(regs, pc, sp);
44455+ do_coredump(SIGKILL, SIGKILL, regs);
44456+}
44457+#endif
44458+
44459+#ifdef CONFIG_PAX_REFCOUNT
44460+void pax_report_refcount_overflow(struct pt_regs *regs)
44461+{
44462+ if (current->signal->curr_ip)
44463+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44464+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
44465+ else
44466+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44467+ current->comm, task_pid_nr(current), current_uid(), current_euid());
44468+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
44469+ show_regs(regs);
44470+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
44471+}
44472+#endif
44473+
44474+#ifdef CONFIG_PAX_USERCOPY
44475+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
44476+int object_is_on_stack(const void *obj, unsigned long len)
44477+{
44478+ const void * const stack = task_stack_page(current);
44479+ const void * const stackend = stack + THREAD_SIZE;
44480+
44481+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44482+ const void *frame = NULL;
44483+ const void *oldframe;
44484+#endif
44485+
44486+ if (obj + len < obj)
44487+ return -1;
44488+
44489+ if (obj + len <= stack || stackend <= obj)
44490+ return 0;
44491+
44492+ if (obj < stack || stackend < obj + len)
44493+ return -1;
44494+
44495+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44496+ oldframe = __builtin_frame_address(1);
44497+ if (oldframe)
44498+ frame = __builtin_frame_address(2);
44499+ /*
44500+ low ----------------------------------------------> high
44501+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
44502+ ^----------------^
44503+ allow copies only within here
44504+ */
44505+ while (stack <= frame && frame < stackend) {
44506+ /* if obj + len extends past the last frame, this
44507+ check won't pass and the next frame will be 0,
44508+ causing us to bail out and correctly report
44509+ the copy as invalid
44510+ */
44511+ if (obj + len <= frame)
44512+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
44513+ oldframe = frame;
44514+ frame = *(const void * const *)frame;
44515+ }
44516+ return -1;
44517+#else
44518+ return 1;
44519+#endif
44520+}
44521+
44522+__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
44523+{
44524+ if (current->signal->curr_ip)
44525+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44526+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44527+ else
44528+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44529+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44530+ dump_stack();
44531+ gr_handle_kernel_exploit();
44532+ do_group_exit(SIGKILL);
44533+}
44534+#endif
44535+
44536+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
44537+void pax_track_stack(void)
44538+{
44539+ unsigned long sp = (unsigned long)&sp;
44540+ if (sp < current_thread_info()->lowest_stack &&
44541+ sp > (unsigned long)task_stack_page(current))
44542+ current_thread_info()->lowest_stack = sp;
44543+}
44544+EXPORT_SYMBOL(pax_track_stack);
44545+#endif
44546+
44547+#ifdef CONFIG_PAX_SIZE_OVERFLOW
44548+void report_size_overflow(const char *file, unsigned int line, const char *func)
44549+{
44550+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
44551+ dump_stack();
44552+ do_group_exit(SIGKILL);
44553+}
44554+EXPORT_SYMBOL(report_size_overflow);
44555+#endif
44556+
44557 static int zap_process(struct task_struct *start, int exit_code)
44558 {
44559 struct task_struct *t;
44560@@ -2014,17 +2361,17 @@ static void wait_for_dump_helpers(struct file *file)
44561 pipe = file->f_path.dentry->d_inode->i_pipe;
44562
44563 pipe_lock(pipe);
44564- pipe->readers++;
44565- pipe->writers--;
44566+ atomic_inc(&pipe->readers);
44567+ atomic_dec(&pipe->writers);
44568
44569- while ((pipe->readers > 1) && (!signal_pending(current))) {
44570+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44571 wake_up_interruptible_sync(&pipe->wait);
44572 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44573 pipe_wait(pipe);
44574 }
44575
44576- pipe->readers--;
44577- pipe->writers++;
44578+ atomic_dec(&pipe->readers);
44579+ atomic_inc(&pipe->writers);
44580 pipe_unlock(pipe);
44581
44582 }
44583@@ -2085,7 +2432,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44584 int retval = 0;
44585 int flag = 0;
44586 int ispipe;
44587- static atomic_t core_dump_count = ATOMIC_INIT(0);
44588+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44589 struct coredump_params cprm = {
44590 .signr = signr,
44591 .regs = regs,
44592@@ -2100,6 +2447,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44593
44594 audit_core_dumps(signr);
44595
44596+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44597+ gr_handle_brute_attach(current, cprm.mm_flags);
44598+
44599 binfmt = mm->binfmt;
44600 if (!binfmt || !binfmt->core_dump)
44601 goto fail;
44602@@ -2167,7 +2517,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44603 }
44604 cprm.limit = RLIM_INFINITY;
44605
44606- dump_count = atomic_inc_return(&core_dump_count);
44607+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
44608 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44609 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44610 task_tgid_vnr(current), current->comm);
44611@@ -2194,6 +2544,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44612 } else {
44613 struct inode *inode;
44614
44615+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44616+
44617 if (cprm.limit < binfmt->min_coredump)
44618 goto fail_unlock;
44619
44620@@ -2237,7 +2589,7 @@ close_fail:
44621 filp_close(cprm.file, NULL);
44622 fail_dropcount:
44623 if (ispipe)
44624- atomic_dec(&core_dump_count);
44625+ atomic_dec_unchecked(&core_dump_count);
44626 fail_unlock:
44627 kfree(cn.corename);
44628 fail_corename:
44629@@ -2256,7 +2608,7 @@ fail:
44630 */
44631 int dump_write(struct file *file, const void *addr, int nr)
44632 {
44633- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44634+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44635 }
44636 EXPORT_SYMBOL(dump_write);
44637
44638diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
44639index a8cbe1b..fed04cb 100644
44640--- a/fs/ext2/balloc.c
44641+++ b/fs/ext2/balloc.c
44642@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
44643
44644 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44645 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44646- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44647+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44648 sbi->s_resuid != current_fsuid() &&
44649 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44650 return 0;
44651diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
44652index a203892..4e64db5 100644
44653--- a/fs/ext3/balloc.c
44654+++ b/fs/ext3/balloc.c
44655@@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
44656
44657 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44658 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44659- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44660+ if (free_blocks < root_blocks + 1 &&
44661 !use_reservation && sbi->s_resuid != current_fsuid() &&
44662- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44663+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
44664+ !capable_nolog(CAP_SYS_RESOURCE)) {
44665 return 0;
44666 }
44667 return 1;
44668diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
44669index f9e2cd8..bfdc476 100644
44670--- a/fs/ext4/balloc.c
44671+++ b/fs/ext4/balloc.c
44672@@ -438,8 +438,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
44673 /* Hm, nope. Are (enough) root reserved clusters available? */
44674 if (sbi->s_resuid == current_fsuid() ||
44675 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
44676- capable(CAP_SYS_RESOURCE) ||
44677- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
44678+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44679+ capable_nolog(CAP_SYS_RESOURCE)) {
44680
44681 if (free_clusters >= (nclusters + dirty_clusters))
44682 return 1;
44683diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44684index 3ce6a0c..0311fe5 100644
44685--- a/fs/ext4/ext4.h
44686+++ b/fs/ext4/ext4.h
44687@@ -1220,19 +1220,19 @@ struct ext4_sb_info {
44688 unsigned long s_mb_last_start;
44689
44690 /* stats for buddy allocator */
44691- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44692- atomic_t s_bal_success; /* we found long enough chunks */
44693- atomic_t s_bal_allocated; /* in blocks */
44694- atomic_t s_bal_ex_scanned; /* total extents scanned */
44695- atomic_t s_bal_goals; /* goal hits */
44696- atomic_t s_bal_breaks; /* too long searches */
44697- atomic_t s_bal_2orders; /* 2^order hits */
44698+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44699+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44700+ atomic_unchecked_t s_bal_allocated; /* in blocks */
44701+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44702+ atomic_unchecked_t s_bal_goals; /* goal hits */
44703+ atomic_unchecked_t s_bal_breaks; /* too long searches */
44704+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44705 spinlock_t s_bal_lock;
44706 unsigned long s_mb_buddies_generated;
44707 unsigned long long s_mb_generation_time;
44708- atomic_t s_mb_lost_chunks;
44709- atomic_t s_mb_preallocated;
44710- atomic_t s_mb_discarded;
44711+ atomic_unchecked_t s_mb_lost_chunks;
44712+ atomic_unchecked_t s_mb_preallocated;
44713+ atomic_unchecked_t s_mb_discarded;
44714 atomic_t s_lock_busy;
44715
44716 /* locality groups */
44717diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
44718index cb990b2..4820141 100644
44719--- a/fs/ext4/mballoc.c
44720+++ b/fs/ext4/mballoc.c
44721@@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
44722 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44723
44724 if (EXT4_SB(sb)->s_mb_stats)
44725- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44726+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44727
44728 break;
44729 }
44730@@ -2088,7 +2088,7 @@ repeat:
44731 ac->ac_status = AC_STATUS_CONTINUE;
44732 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44733 cr = 3;
44734- atomic_inc(&sbi->s_mb_lost_chunks);
44735+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44736 goto repeat;
44737 }
44738 }
44739@@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
44740 if (sbi->s_mb_stats) {
44741 ext4_msg(sb, KERN_INFO,
44742 "mballoc: %u blocks %u reqs (%u success)",
44743- atomic_read(&sbi->s_bal_allocated),
44744- atomic_read(&sbi->s_bal_reqs),
44745- atomic_read(&sbi->s_bal_success));
44746+ atomic_read_unchecked(&sbi->s_bal_allocated),
44747+ atomic_read_unchecked(&sbi->s_bal_reqs),
44748+ atomic_read_unchecked(&sbi->s_bal_success));
44749 ext4_msg(sb, KERN_INFO,
44750 "mballoc: %u extents scanned, %u goal hits, "
44751 "%u 2^N hits, %u breaks, %u lost",
44752- atomic_read(&sbi->s_bal_ex_scanned),
44753- atomic_read(&sbi->s_bal_goals),
44754- atomic_read(&sbi->s_bal_2orders),
44755- atomic_read(&sbi->s_bal_breaks),
44756- atomic_read(&sbi->s_mb_lost_chunks));
44757+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44758+ atomic_read_unchecked(&sbi->s_bal_goals),
44759+ atomic_read_unchecked(&sbi->s_bal_2orders),
44760+ atomic_read_unchecked(&sbi->s_bal_breaks),
44761+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
44762 ext4_msg(sb, KERN_INFO,
44763 "mballoc: %lu generated and it took %Lu",
44764 sbi->s_mb_buddies_generated,
44765 sbi->s_mb_generation_time);
44766 ext4_msg(sb, KERN_INFO,
44767 "mballoc: %u preallocated, %u discarded",
44768- atomic_read(&sbi->s_mb_preallocated),
44769- atomic_read(&sbi->s_mb_discarded));
44770+ atomic_read_unchecked(&sbi->s_mb_preallocated),
44771+ atomic_read_unchecked(&sbi->s_mb_discarded));
44772 }
44773
44774 free_percpu(sbi->s_locality_groups);
44775@@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
44776 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44777
44778 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44779- atomic_inc(&sbi->s_bal_reqs);
44780- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44781+ atomic_inc_unchecked(&sbi->s_bal_reqs);
44782+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44783 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44784- atomic_inc(&sbi->s_bal_success);
44785- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44786+ atomic_inc_unchecked(&sbi->s_bal_success);
44787+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44788 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44789 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44790- atomic_inc(&sbi->s_bal_goals);
44791+ atomic_inc_unchecked(&sbi->s_bal_goals);
44792 if (ac->ac_found > sbi->s_mb_max_to_scan)
44793- atomic_inc(&sbi->s_bal_breaks);
44794+ atomic_inc_unchecked(&sbi->s_bal_breaks);
44795 }
44796
44797 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
44798@@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
44799 trace_ext4_mb_new_inode_pa(ac, pa);
44800
44801 ext4_mb_use_inode_pa(ac, pa);
44802- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
44803+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
44804
44805 ei = EXT4_I(ac->ac_inode);
44806 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44807@@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
44808 trace_ext4_mb_new_group_pa(ac, pa);
44809
44810 ext4_mb_use_group_pa(ac, pa);
44811- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44812+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44813
44814 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44815 lg = ac->ac_lg;
44816@@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
44817 * from the bitmap and continue.
44818 */
44819 }
44820- atomic_add(free, &sbi->s_mb_discarded);
44821+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
44822
44823 return err;
44824 }
44825@@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44826 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44827 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44828 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44829- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44830+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44831 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44832
44833 return 0;
44834diff --git a/fs/fcntl.c b/fs/fcntl.c
44835index 22764c7..86372c9 100644
44836--- a/fs/fcntl.c
44837+++ b/fs/fcntl.c
44838@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44839 if (err)
44840 return err;
44841
44842+ if (gr_handle_chroot_fowner(pid, type))
44843+ return -ENOENT;
44844+ if (gr_check_protected_task_fowner(pid, type))
44845+ return -EACCES;
44846+
44847 f_modown(filp, pid, type, force);
44848 return 0;
44849 }
44850@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44851
44852 static int f_setown_ex(struct file *filp, unsigned long arg)
44853 {
44854- struct f_owner_ex * __user owner_p = (void * __user)arg;
44855+ struct f_owner_ex __user *owner_p = (void __user *)arg;
44856 struct f_owner_ex owner;
44857 struct pid *pid;
44858 int type;
44859@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44860
44861 static int f_getown_ex(struct file *filp, unsigned long arg)
44862 {
44863- struct f_owner_ex * __user owner_p = (void * __user)arg;
44864+ struct f_owner_ex __user *owner_p = (void __user *)arg;
44865 struct f_owner_ex owner;
44866 int ret = 0;
44867
44868@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44869 switch (cmd) {
44870 case F_DUPFD:
44871 case F_DUPFD_CLOEXEC:
44872+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44873 if (arg >= rlimit(RLIMIT_NOFILE))
44874 break;
44875 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44876diff --git a/fs/fifo.c b/fs/fifo.c
44877index b1a524d..4ee270e 100644
44878--- a/fs/fifo.c
44879+++ b/fs/fifo.c
44880@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44881 */
44882 filp->f_op = &read_pipefifo_fops;
44883 pipe->r_counter++;
44884- if (pipe->readers++ == 0)
44885+ if (atomic_inc_return(&pipe->readers) == 1)
44886 wake_up_partner(inode);
44887
44888- if (!pipe->writers) {
44889+ if (!atomic_read(&pipe->writers)) {
44890 if ((filp->f_flags & O_NONBLOCK)) {
44891 /* suppress POLLHUP until we have
44892 * seen a writer */
44893@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44894 * errno=ENXIO when there is no process reading the FIFO.
44895 */
44896 ret = -ENXIO;
44897- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44898+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44899 goto err;
44900
44901 filp->f_op = &write_pipefifo_fops;
44902 pipe->w_counter++;
44903- if (!pipe->writers++)
44904+ if (atomic_inc_return(&pipe->writers) == 1)
44905 wake_up_partner(inode);
44906
44907- if (!pipe->readers) {
44908+ if (!atomic_read(&pipe->readers)) {
44909 wait_for_partner(inode, &pipe->r_counter);
44910 if (signal_pending(current))
44911 goto err_wr;
44912@@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44913 */
44914 filp->f_op = &rdwr_pipefifo_fops;
44915
44916- pipe->readers++;
44917- pipe->writers++;
44918+ atomic_inc(&pipe->readers);
44919+ atomic_inc(&pipe->writers);
44920 pipe->r_counter++;
44921 pipe->w_counter++;
44922- if (pipe->readers == 1 || pipe->writers == 1)
44923+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44924 wake_up_partner(inode);
44925 break;
44926
44927@@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
44928 return 0;
44929
44930 err_rd:
44931- if (!--pipe->readers)
44932+ if (atomic_dec_and_test(&pipe->readers))
44933 wake_up_interruptible(&pipe->wait);
44934 ret = -ERESTARTSYS;
44935 goto err;
44936
44937 err_wr:
44938- if (!--pipe->writers)
44939+ if (atomic_dec_and_test(&pipe->writers))
44940 wake_up_interruptible(&pipe->wait);
44941 ret = -ERESTARTSYS;
44942 goto err;
44943
44944 err:
44945- if (!pipe->readers && !pipe->writers)
44946+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44947 free_pipe_info(inode);
44948
44949 err_nocleanup:
44950diff --git a/fs/file.c b/fs/file.c
44951index 4c6992d..104cdea 100644
44952--- a/fs/file.c
44953+++ b/fs/file.c
44954@@ -15,6 +15,7 @@
44955 #include <linux/slab.h>
44956 #include <linux/vmalloc.h>
44957 #include <linux/file.h>
44958+#include <linux/security.h>
44959 #include <linux/fdtable.h>
44960 #include <linux/bitops.h>
44961 #include <linux/interrupt.h>
44962@@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
44963 * N.B. For clone tasks sharing a files structure, this test
44964 * will limit the total number of files that can be opened.
44965 */
44966+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
44967 if (nr >= rlimit(RLIMIT_NOFILE))
44968 return -EMFILE;
44969
44970diff --git a/fs/filesystems.c b/fs/filesystems.c
44971index 96f2428..f5eeb8e 100644
44972--- a/fs/filesystems.c
44973+++ b/fs/filesystems.c
44974@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
44975 int len = dot ? dot - name : strlen(name);
44976
44977 fs = __get_fs_type(name, len);
44978+
44979+#ifdef CONFIG_GRKERNSEC_MODHARDEN
44980+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44981+#else
44982 if (!fs && (request_module("%.*s", len, name) == 0))
44983+#endif
44984 fs = __get_fs_type(name, len);
44985
44986 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44987diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44988index 78b519c..a8b4979 100644
44989--- a/fs/fs_struct.c
44990+++ b/fs/fs_struct.c
44991@@ -4,6 +4,7 @@
44992 #include <linux/path.h>
44993 #include <linux/slab.h>
44994 #include <linux/fs_struct.h>
44995+#include <linux/grsecurity.h>
44996 #include "internal.h"
44997
44998 static inline void path_get_longterm(struct path *path)
44999@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
45000 old_root = fs->root;
45001 fs->root = *path;
45002 path_get_longterm(path);
45003+ gr_set_chroot_entries(current, path);
45004 write_seqcount_end(&fs->seq);
45005 spin_unlock(&fs->lock);
45006 if (old_root.dentry)
45007@@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
45008 && fs->root.mnt == old_root->mnt) {
45009 path_get_longterm(new_root);
45010 fs->root = *new_root;
45011+ gr_set_chroot_entries(p, new_root);
45012 count++;
45013 }
45014 if (fs->pwd.dentry == old_root->dentry
45015@@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
45016 spin_lock(&fs->lock);
45017 write_seqcount_begin(&fs->seq);
45018 tsk->fs = NULL;
45019- kill = !--fs->users;
45020+ gr_clear_chroot_entries(tsk);
45021+ kill = !atomic_dec_return(&fs->users);
45022 write_seqcount_end(&fs->seq);
45023 spin_unlock(&fs->lock);
45024 task_unlock(tsk);
45025@@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45026 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
45027 /* We don't need to lock fs - think why ;-) */
45028 if (fs) {
45029- fs->users = 1;
45030+ atomic_set(&fs->users, 1);
45031 fs->in_exec = 0;
45032 spin_lock_init(&fs->lock);
45033 seqcount_init(&fs->seq);
45034@@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45035 spin_lock(&old->lock);
45036 fs->root = old->root;
45037 path_get_longterm(&fs->root);
45038+ /* instead of calling gr_set_chroot_entries here,
45039+ we call it from every caller of this function
45040+ */
45041 fs->pwd = old->pwd;
45042 path_get_longterm(&fs->pwd);
45043 spin_unlock(&old->lock);
45044@@ -150,8 +157,9 @@ int unshare_fs_struct(void)
45045
45046 task_lock(current);
45047 spin_lock(&fs->lock);
45048- kill = !--fs->users;
45049+ kill = !atomic_dec_return(&fs->users);
45050 current->fs = new_fs;
45051+ gr_set_chroot_entries(current, &new_fs->root);
45052 spin_unlock(&fs->lock);
45053 task_unlock(current);
45054
45055@@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
45056
45057 int current_umask(void)
45058 {
45059- return current->fs->umask;
45060+ return current->fs->umask | gr_acl_umask();
45061 }
45062 EXPORT_SYMBOL(current_umask);
45063
45064 /* to be mentioned only in INIT_TASK */
45065 struct fs_struct init_fs = {
45066- .users = 1,
45067+ .users = ATOMIC_INIT(1),
45068 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
45069 .seq = SEQCNT_ZERO,
45070 .umask = 0022,
45071@@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
45072 task_lock(current);
45073
45074 spin_lock(&init_fs.lock);
45075- init_fs.users++;
45076+ atomic_inc(&init_fs.users);
45077 spin_unlock(&init_fs.lock);
45078
45079 spin_lock(&fs->lock);
45080 current->fs = &init_fs;
45081- kill = !--fs->users;
45082+ gr_set_chroot_entries(current, &current->fs->root);
45083+ kill = !atomic_dec_return(&fs->users);
45084 spin_unlock(&fs->lock);
45085
45086 task_unlock(current);
45087diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
45088index 9905350..02eaec4 100644
45089--- a/fs/fscache/cookie.c
45090+++ b/fs/fscache/cookie.c
45091@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
45092 parent ? (char *) parent->def->name : "<no-parent>",
45093 def->name, netfs_data);
45094
45095- fscache_stat(&fscache_n_acquires);
45096+ fscache_stat_unchecked(&fscache_n_acquires);
45097
45098 /* if there's no parent cookie, then we don't create one here either */
45099 if (!parent) {
45100- fscache_stat(&fscache_n_acquires_null);
45101+ fscache_stat_unchecked(&fscache_n_acquires_null);
45102 _leave(" [no parent]");
45103 return NULL;
45104 }
45105@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
45106 /* allocate and initialise a cookie */
45107 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
45108 if (!cookie) {
45109- fscache_stat(&fscache_n_acquires_oom);
45110+ fscache_stat_unchecked(&fscache_n_acquires_oom);
45111 _leave(" [ENOMEM]");
45112 return NULL;
45113 }
45114@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45115
45116 switch (cookie->def->type) {
45117 case FSCACHE_COOKIE_TYPE_INDEX:
45118- fscache_stat(&fscache_n_cookie_index);
45119+ fscache_stat_unchecked(&fscache_n_cookie_index);
45120 break;
45121 case FSCACHE_COOKIE_TYPE_DATAFILE:
45122- fscache_stat(&fscache_n_cookie_data);
45123+ fscache_stat_unchecked(&fscache_n_cookie_data);
45124 break;
45125 default:
45126- fscache_stat(&fscache_n_cookie_special);
45127+ fscache_stat_unchecked(&fscache_n_cookie_special);
45128 break;
45129 }
45130
45131@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45132 if (fscache_acquire_non_index_cookie(cookie) < 0) {
45133 atomic_dec(&parent->n_children);
45134 __fscache_cookie_put(cookie);
45135- fscache_stat(&fscache_n_acquires_nobufs);
45136+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
45137 _leave(" = NULL");
45138 return NULL;
45139 }
45140 }
45141
45142- fscache_stat(&fscache_n_acquires_ok);
45143+ fscache_stat_unchecked(&fscache_n_acquires_ok);
45144 _leave(" = %p", cookie);
45145 return cookie;
45146 }
45147@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
45148 cache = fscache_select_cache_for_object(cookie->parent);
45149 if (!cache) {
45150 up_read(&fscache_addremove_sem);
45151- fscache_stat(&fscache_n_acquires_no_cache);
45152+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
45153 _leave(" = -ENOMEDIUM [no cache]");
45154 return -ENOMEDIUM;
45155 }
45156@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
45157 object = cache->ops->alloc_object(cache, cookie);
45158 fscache_stat_d(&fscache_n_cop_alloc_object);
45159 if (IS_ERR(object)) {
45160- fscache_stat(&fscache_n_object_no_alloc);
45161+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
45162 ret = PTR_ERR(object);
45163 goto error;
45164 }
45165
45166- fscache_stat(&fscache_n_object_alloc);
45167+ fscache_stat_unchecked(&fscache_n_object_alloc);
45168
45169 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
45170
45171@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
45172 struct fscache_object *object;
45173 struct hlist_node *_p;
45174
45175- fscache_stat(&fscache_n_updates);
45176+ fscache_stat_unchecked(&fscache_n_updates);
45177
45178 if (!cookie) {
45179- fscache_stat(&fscache_n_updates_null);
45180+ fscache_stat_unchecked(&fscache_n_updates_null);
45181 _leave(" [no cookie]");
45182 return;
45183 }
45184@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45185 struct fscache_object *object;
45186 unsigned long event;
45187
45188- fscache_stat(&fscache_n_relinquishes);
45189+ fscache_stat_unchecked(&fscache_n_relinquishes);
45190 if (retire)
45191- fscache_stat(&fscache_n_relinquishes_retire);
45192+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
45193
45194 if (!cookie) {
45195- fscache_stat(&fscache_n_relinquishes_null);
45196+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
45197 _leave(" [no cookie]");
45198 return;
45199 }
45200@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45201
45202 /* wait for the cookie to finish being instantiated (or to fail) */
45203 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
45204- fscache_stat(&fscache_n_relinquishes_waitcrt);
45205+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
45206 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
45207 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
45208 }
45209diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
45210index f6aad48..88dcf26 100644
45211--- a/fs/fscache/internal.h
45212+++ b/fs/fscache/internal.h
45213@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
45214 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
45215 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
45216
45217-extern atomic_t fscache_n_op_pend;
45218-extern atomic_t fscache_n_op_run;
45219-extern atomic_t fscache_n_op_enqueue;
45220-extern atomic_t fscache_n_op_deferred_release;
45221-extern atomic_t fscache_n_op_release;
45222-extern atomic_t fscache_n_op_gc;
45223-extern atomic_t fscache_n_op_cancelled;
45224-extern atomic_t fscache_n_op_rejected;
45225+extern atomic_unchecked_t fscache_n_op_pend;
45226+extern atomic_unchecked_t fscache_n_op_run;
45227+extern atomic_unchecked_t fscache_n_op_enqueue;
45228+extern atomic_unchecked_t fscache_n_op_deferred_release;
45229+extern atomic_unchecked_t fscache_n_op_release;
45230+extern atomic_unchecked_t fscache_n_op_gc;
45231+extern atomic_unchecked_t fscache_n_op_cancelled;
45232+extern atomic_unchecked_t fscache_n_op_rejected;
45233
45234-extern atomic_t fscache_n_attr_changed;
45235-extern atomic_t fscache_n_attr_changed_ok;
45236-extern atomic_t fscache_n_attr_changed_nobufs;
45237-extern atomic_t fscache_n_attr_changed_nomem;
45238-extern atomic_t fscache_n_attr_changed_calls;
45239+extern atomic_unchecked_t fscache_n_attr_changed;
45240+extern atomic_unchecked_t fscache_n_attr_changed_ok;
45241+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
45242+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
45243+extern atomic_unchecked_t fscache_n_attr_changed_calls;
45244
45245-extern atomic_t fscache_n_allocs;
45246-extern atomic_t fscache_n_allocs_ok;
45247-extern atomic_t fscache_n_allocs_wait;
45248-extern atomic_t fscache_n_allocs_nobufs;
45249-extern atomic_t fscache_n_allocs_intr;
45250-extern atomic_t fscache_n_allocs_object_dead;
45251-extern atomic_t fscache_n_alloc_ops;
45252-extern atomic_t fscache_n_alloc_op_waits;
45253+extern atomic_unchecked_t fscache_n_allocs;
45254+extern atomic_unchecked_t fscache_n_allocs_ok;
45255+extern atomic_unchecked_t fscache_n_allocs_wait;
45256+extern atomic_unchecked_t fscache_n_allocs_nobufs;
45257+extern atomic_unchecked_t fscache_n_allocs_intr;
45258+extern atomic_unchecked_t fscache_n_allocs_object_dead;
45259+extern atomic_unchecked_t fscache_n_alloc_ops;
45260+extern atomic_unchecked_t fscache_n_alloc_op_waits;
45261
45262-extern atomic_t fscache_n_retrievals;
45263-extern atomic_t fscache_n_retrievals_ok;
45264-extern atomic_t fscache_n_retrievals_wait;
45265-extern atomic_t fscache_n_retrievals_nodata;
45266-extern atomic_t fscache_n_retrievals_nobufs;
45267-extern atomic_t fscache_n_retrievals_intr;
45268-extern atomic_t fscache_n_retrievals_nomem;
45269-extern atomic_t fscache_n_retrievals_object_dead;
45270-extern atomic_t fscache_n_retrieval_ops;
45271-extern atomic_t fscache_n_retrieval_op_waits;
45272+extern atomic_unchecked_t fscache_n_retrievals;
45273+extern atomic_unchecked_t fscache_n_retrievals_ok;
45274+extern atomic_unchecked_t fscache_n_retrievals_wait;
45275+extern atomic_unchecked_t fscache_n_retrievals_nodata;
45276+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
45277+extern atomic_unchecked_t fscache_n_retrievals_intr;
45278+extern atomic_unchecked_t fscache_n_retrievals_nomem;
45279+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
45280+extern atomic_unchecked_t fscache_n_retrieval_ops;
45281+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
45282
45283-extern atomic_t fscache_n_stores;
45284-extern atomic_t fscache_n_stores_ok;
45285-extern atomic_t fscache_n_stores_again;
45286-extern atomic_t fscache_n_stores_nobufs;
45287-extern atomic_t fscache_n_stores_oom;
45288-extern atomic_t fscache_n_store_ops;
45289-extern atomic_t fscache_n_store_calls;
45290-extern atomic_t fscache_n_store_pages;
45291-extern atomic_t fscache_n_store_radix_deletes;
45292-extern atomic_t fscache_n_store_pages_over_limit;
45293+extern atomic_unchecked_t fscache_n_stores;
45294+extern atomic_unchecked_t fscache_n_stores_ok;
45295+extern atomic_unchecked_t fscache_n_stores_again;
45296+extern atomic_unchecked_t fscache_n_stores_nobufs;
45297+extern atomic_unchecked_t fscache_n_stores_oom;
45298+extern atomic_unchecked_t fscache_n_store_ops;
45299+extern atomic_unchecked_t fscache_n_store_calls;
45300+extern atomic_unchecked_t fscache_n_store_pages;
45301+extern atomic_unchecked_t fscache_n_store_radix_deletes;
45302+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
45303
45304-extern atomic_t fscache_n_store_vmscan_not_storing;
45305-extern atomic_t fscache_n_store_vmscan_gone;
45306-extern atomic_t fscache_n_store_vmscan_busy;
45307-extern atomic_t fscache_n_store_vmscan_cancelled;
45308+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45309+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
45310+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
45311+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45312
45313-extern atomic_t fscache_n_marks;
45314-extern atomic_t fscache_n_uncaches;
45315+extern atomic_unchecked_t fscache_n_marks;
45316+extern atomic_unchecked_t fscache_n_uncaches;
45317
45318-extern atomic_t fscache_n_acquires;
45319-extern atomic_t fscache_n_acquires_null;
45320-extern atomic_t fscache_n_acquires_no_cache;
45321-extern atomic_t fscache_n_acquires_ok;
45322-extern atomic_t fscache_n_acquires_nobufs;
45323-extern atomic_t fscache_n_acquires_oom;
45324+extern atomic_unchecked_t fscache_n_acquires;
45325+extern atomic_unchecked_t fscache_n_acquires_null;
45326+extern atomic_unchecked_t fscache_n_acquires_no_cache;
45327+extern atomic_unchecked_t fscache_n_acquires_ok;
45328+extern atomic_unchecked_t fscache_n_acquires_nobufs;
45329+extern atomic_unchecked_t fscache_n_acquires_oom;
45330
45331-extern atomic_t fscache_n_updates;
45332-extern atomic_t fscache_n_updates_null;
45333-extern atomic_t fscache_n_updates_run;
45334+extern atomic_unchecked_t fscache_n_updates;
45335+extern atomic_unchecked_t fscache_n_updates_null;
45336+extern atomic_unchecked_t fscache_n_updates_run;
45337
45338-extern atomic_t fscache_n_relinquishes;
45339-extern atomic_t fscache_n_relinquishes_null;
45340-extern atomic_t fscache_n_relinquishes_waitcrt;
45341-extern atomic_t fscache_n_relinquishes_retire;
45342+extern atomic_unchecked_t fscache_n_relinquishes;
45343+extern atomic_unchecked_t fscache_n_relinquishes_null;
45344+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45345+extern atomic_unchecked_t fscache_n_relinquishes_retire;
45346
45347-extern atomic_t fscache_n_cookie_index;
45348-extern atomic_t fscache_n_cookie_data;
45349-extern atomic_t fscache_n_cookie_special;
45350+extern atomic_unchecked_t fscache_n_cookie_index;
45351+extern atomic_unchecked_t fscache_n_cookie_data;
45352+extern atomic_unchecked_t fscache_n_cookie_special;
45353
45354-extern atomic_t fscache_n_object_alloc;
45355-extern atomic_t fscache_n_object_no_alloc;
45356-extern atomic_t fscache_n_object_lookups;
45357-extern atomic_t fscache_n_object_lookups_negative;
45358-extern atomic_t fscache_n_object_lookups_positive;
45359-extern atomic_t fscache_n_object_lookups_timed_out;
45360-extern atomic_t fscache_n_object_created;
45361-extern atomic_t fscache_n_object_avail;
45362-extern atomic_t fscache_n_object_dead;
45363+extern atomic_unchecked_t fscache_n_object_alloc;
45364+extern atomic_unchecked_t fscache_n_object_no_alloc;
45365+extern atomic_unchecked_t fscache_n_object_lookups;
45366+extern atomic_unchecked_t fscache_n_object_lookups_negative;
45367+extern atomic_unchecked_t fscache_n_object_lookups_positive;
45368+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
45369+extern atomic_unchecked_t fscache_n_object_created;
45370+extern atomic_unchecked_t fscache_n_object_avail;
45371+extern atomic_unchecked_t fscache_n_object_dead;
45372
45373-extern atomic_t fscache_n_checkaux_none;
45374-extern atomic_t fscache_n_checkaux_okay;
45375-extern atomic_t fscache_n_checkaux_update;
45376-extern atomic_t fscache_n_checkaux_obsolete;
45377+extern atomic_unchecked_t fscache_n_checkaux_none;
45378+extern atomic_unchecked_t fscache_n_checkaux_okay;
45379+extern atomic_unchecked_t fscache_n_checkaux_update;
45380+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
45381
45382 extern atomic_t fscache_n_cop_alloc_object;
45383 extern atomic_t fscache_n_cop_lookup_object;
45384@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
45385 atomic_inc(stat);
45386 }
45387
45388+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
45389+{
45390+ atomic_inc_unchecked(stat);
45391+}
45392+
45393 static inline void fscache_stat_d(atomic_t *stat)
45394 {
45395 atomic_dec(stat);
45396@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
45397
45398 #define __fscache_stat(stat) (NULL)
45399 #define fscache_stat(stat) do {} while (0)
45400+#define fscache_stat_unchecked(stat) do {} while (0)
45401 #define fscache_stat_d(stat) do {} while (0)
45402 #endif
45403
45404diff --git a/fs/fscache/object.c b/fs/fscache/object.c
45405index b6b897c..0ffff9c 100644
45406--- a/fs/fscache/object.c
45407+++ b/fs/fscache/object.c
45408@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45409 /* update the object metadata on disk */
45410 case FSCACHE_OBJECT_UPDATING:
45411 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
45412- fscache_stat(&fscache_n_updates_run);
45413+ fscache_stat_unchecked(&fscache_n_updates_run);
45414 fscache_stat(&fscache_n_cop_update_object);
45415 object->cache->ops->update_object(object);
45416 fscache_stat_d(&fscache_n_cop_update_object);
45417@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45418 spin_lock(&object->lock);
45419 object->state = FSCACHE_OBJECT_DEAD;
45420 spin_unlock(&object->lock);
45421- fscache_stat(&fscache_n_object_dead);
45422+ fscache_stat_unchecked(&fscache_n_object_dead);
45423 goto terminal_transit;
45424
45425 /* handle the parent cache of this object being withdrawn from
45426@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45427 spin_lock(&object->lock);
45428 object->state = FSCACHE_OBJECT_DEAD;
45429 spin_unlock(&object->lock);
45430- fscache_stat(&fscache_n_object_dead);
45431+ fscache_stat_unchecked(&fscache_n_object_dead);
45432 goto terminal_transit;
45433
45434 /* complain about the object being woken up once it is
45435@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45436 parent->cookie->def->name, cookie->def->name,
45437 object->cache->tag->name);
45438
45439- fscache_stat(&fscache_n_object_lookups);
45440+ fscache_stat_unchecked(&fscache_n_object_lookups);
45441 fscache_stat(&fscache_n_cop_lookup_object);
45442 ret = object->cache->ops->lookup_object(object);
45443 fscache_stat_d(&fscache_n_cop_lookup_object);
45444@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45445 if (ret == -ETIMEDOUT) {
45446 /* probably stuck behind another object, so move this one to
45447 * the back of the queue */
45448- fscache_stat(&fscache_n_object_lookups_timed_out);
45449+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
45450 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45451 }
45452
45453@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
45454
45455 spin_lock(&object->lock);
45456 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45457- fscache_stat(&fscache_n_object_lookups_negative);
45458+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
45459
45460 /* transit here to allow write requests to begin stacking up
45461 * and read requests to begin returning ENODATA */
45462@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
45463 * result, in which case there may be data available */
45464 spin_lock(&object->lock);
45465 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45466- fscache_stat(&fscache_n_object_lookups_positive);
45467+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
45468
45469 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
45470
45471@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
45472 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45473 } else {
45474 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
45475- fscache_stat(&fscache_n_object_created);
45476+ fscache_stat_unchecked(&fscache_n_object_created);
45477
45478 object->state = FSCACHE_OBJECT_AVAILABLE;
45479 spin_unlock(&object->lock);
45480@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
45481 fscache_enqueue_dependents(object);
45482
45483 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
45484- fscache_stat(&fscache_n_object_avail);
45485+ fscache_stat_unchecked(&fscache_n_object_avail);
45486
45487 _leave("");
45488 }
45489@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45490 enum fscache_checkaux result;
45491
45492 if (!object->cookie->def->check_aux) {
45493- fscache_stat(&fscache_n_checkaux_none);
45494+ fscache_stat_unchecked(&fscache_n_checkaux_none);
45495 return FSCACHE_CHECKAUX_OKAY;
45496 }
45497
45498@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45499 switch (result) {
45500 /* entry okay as is */
45501 case FSCACHE_CHECKAUX_OKAY:
45502- fscache_stat(&fscache_n_checkaux_okay);
45503+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
45504 break;
45505
45506 /* entry requires update */
45507 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
45508- fscache_stat(&fscache_n_checkaux_update);
45509+ fscache_stat_unchecked(&fscache_n_checkaux_update);
45510 break;
45511
45512 /* entry requires deletion */
45513 case FSCACHE_CHECKAUX_OBSOLETE:
45514- fscache_stat(&fscache_n_checkaux_obsolete);
45515+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
45516 break;
45517
45518 default:
45519diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
45520index 30afdfa..2256596 100644
45521--- a/fs/fscache/operation.c
45522+++ b/fs/fscache/operation.c
45523@@ -17,7 +17,7 @@
45524 #include <linux/slab.h>
45525 #include "internal.h"
45526
45527-atomic_t fscache_op_debug_id;
45528+atomic_unchecked_t fscache_op_debug_id;
45529 EXPORT_SYMBOL(fscache_op_debug_id);
45530
45531 /**
45532@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
45533 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
45534 ASSERTCMP(atomic_read(&op->usage), >, 0);
45535
45536- fscache_stat(&fscache_n_op_enqueue);
45537+ fscache_stat_unchecked(&fscache_n_op_enqueue);
45538 switch (op->flags & FSCACHE_OP_TYPE) {
45539 case FSCACHE_OP_ASYNC:
45540 _debug("queue async");
45541@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
45542 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45543 if (op->processor)
45544 fscache_enqueue_operation(op);
45545- fscache_stat(&fscache_n_op_run);
45546+ fscache_stat_unchecked(&fscache_n_op_run);
45547 }
45548
45549 /*
45550@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45551 if (object->n_ops > 1) {
45552 atomic_inc(&op->usage);
45553 list_add_tail(&op->pend_link, &object->pending_ops);
45554- fscache_stat(&fscache_n_op_pend);
45555+ fscache_stat_unchecked(&fscache_n_op_pend);
45556 } else if (!list_empty(&object->pending_ops)) {
45557 atomic_inc(&op->usage);
45558 list_add_tail(&op->pend_link, &object->pending_ops);
45559- fscache_stat(&fscache_n_op_pend);
45560+ fscache_stat_unchecked(&fscache_n_op_pend);
45561 fscache_start_operations(object);
45562 } else {
45563 ASSERTCMP(object->n_in_progress, ==, 0);
45564@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45565 object->n_exclusive++; /* reads and writes must wait */
45566 atomic_inc(&op->usage);
45567 list_add_tail(&op->pend_link, &object->pending_ops);
45568- fscache_stat(&fscache_n_op_pend);
45569+ fscache_stat_unchecked(&fscache_n_op_pend);
45570 ret = 0;
45571 } else {
45572 /* not allowed to submit ops in any other state */
45573@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
45574 if (object->n_exclusive > 0) {
45575 atomic_inc(&op->usage);
45576 list_add_tail(&op->pend_link, &object->pending_ops);
45577- fscache_stat(&fscache_n_op_pend);
45578+ fscache_stat_unchecked(&fscache_n_op_pend);
45579 } else if (!list_empty(&object->pending_ops)) {
45580 atomic_inc(&op->usage);
45581 list_add_tail(&op->pend_link, &object->pending_ops);
45582- fscache_stat(&fscache_n_op_pend);
45583+ fscache_stat_unchecked(&fscache_n_op_pend);
45584 fscache_start_operations(object);
45585 } else {
45586 ASSERTCMP(object->n_exclusive, ==, 0);
45587@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
45588 object->n_ops++;
45589 atomic_inc(&op->usage);
45590 list_add_tail(&op->pend_link, &object->pending_ops);
45591- fscache_stat(&fscache_n_op_pend);
45592+ fscache_stat_unchecked(&fscache_n_op_pend);
45593 ret = 0;
45594 } else if (object->state == FSCACHE_OBJECT_DYING ||
45595 object->state == FSCACHE_OBJECT_LC_DYING ||
45596 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45597- fscache_stat(&fscache_n_op_rejected);
45598+ fscache_stat_unchecked(&fscache_n_op_rejected);
45599 ret = -ENOBUFS;
45600 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45601 fscache_report_unexpected_submission(object, op, ostate);
45602@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
45603
45604 ret = -EBUSY;
45605 if (!list_empty(&op->pend_link)) {
45606- fscache_stat(&fscache_n_op_cancelled);
45607+ fscache_stat_unchecked(&fscache_n_op_cancelled);
45608 list_del_init(&op->pend_link);
45609 object->n_ops--;
45610 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
45611@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
45612 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45613 BUG();
45614
45615- fscache_stat(&fscache_n_op_release);
45616+ fscache_stat_unchecked(&fscache_n_op_release);
45617
45618 if (op->release) {
45619 op->release(op);
45620@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
45621 * lock, and defer it otherwise */
45622 if (!spin_trylock(&object->lock)) {
45623 _debug("defer put");
45624- fscache_stat(&fscache_n_op_deferred_release);
45625+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
45626
45627 cache = object->cache;
45628 spin_lock(&cache->op_gc_list_lock);
45629@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
45630
45631 _debug("GC DEFERRED REL OBJ%x OP%x",
45632 object->debug_id, op->debug_id);
45633- fscache_stat(&fscache_n_op_gc);
45634+ fscache_stat_unchecked(&fscache_n_op_gc);
45635
45636 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45637
45638diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45639index 3f7a59b..cf196cc 100644
45640--- a/fs/fscache/page.c
45641+++ b/fs/fscache/page.c
45642@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45643 val = radix_tree_lookup(&cookie->stores, page->index);
45644 if (!val) {
45645 rcu_read_unlock();
45646- fscache_stat(&fscache_n_store_vmscan_not_storing);
45647+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45648 __fscache_uncache_page(cookie, page);
45649 return true;
45650 }
45651@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45652 spin_unlock(&cookie->stores_lock);
45653
45654 if (xpage) {
45655- fscache_stat(&fscache_n_store_vmscan_cancelled);
45656- fscache_stat(&fscache_n_store_radix_deletes);
45657+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45658+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45659 ASSERTCMP(xpage, ==, page);
45660 } else {
45661- fscache_stat(&fscache_n_store_vmscan_gone);
45662+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45663 }
45664
45665 wake_up_bit(&cookie->flags, 0);
45666@@ -107,7 +107,7 @@ page_busy:
45667 /* we might want to wait here, but that could deadlock the allocator as
45668 * the work threads writing to the cache may all end up sleeping
45669 * on memory allocation */
45670- fscache_stat(&fscache_n_store_vmscan_busy);
45671+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45672 return false;
45673 }
45674 EXPORT_SYMBOL(__fscache_maybe_release_page);
45675@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
45676 FSCACHE_COOKIE_STORING_TAG);
45677 if (!radix_tree_tag_get(&cookie->stores, page->index,
45678 FSCACHE_COOKIE_PENDING_TAG)) {
45679- fscache_stat(&fscache_n_store_radix_deletes);
45680+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45681 xpage = radix_tree_delete(&cookie->stores, page->index);
45682 }
45683 spin_unlock(&cookie->stores_lock);
45684@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
45685
45686 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45687
45688- fscache_stat(&fscache_n_attr_changed_calls);
45689+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45690
45691 if (fscache_object_is_active(object)) {
45692 fscache_stat(&fscache_n_cop_attr_changed);
45693@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45694
45695 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45696
45697- fscache_stat(&fscache_n_attr_changed);
45698+ fscache_stat_unchecked(&fscache_n_attr_changed);
45699
45700 op = kzalloc(sizeof(*op), GFP_KERNEL);
45701 if (!op) {
45702- fscache_stat(&fscache_n_attr_changed_nomem);
45703+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45704 _leave(" = -ENOMEM");
45705 return -ENOMEM;
45706 }
45707@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45708 if (fscache_submit_exclusive_op(object, op) < 0)
45709 goto nobufs;
45710 spin_unlock(&cookie->lock);
45711- fscache_stat(&fscache_n_attr_changed_ok);
45712+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45713 fscache_put_operation(op);
45714 _leave(" = 0");
45715 return 0;
45716@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45717 nobufs:
45718 spin_unlock(&cookie->lock);
45719 kfree(op);
45720- fscache_stat(&fscache_n_attr_changed_nobufs);
45721+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45722 _leave(" = %d", -ENOBUFS);
45723 return -ENOBUFS;
45724 }
45725@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
45726 /* allocate a retrieval operation and attempt to submit it */
45727 op = kzalloc(sizeof(*op), GFP_NOIO);
45728 if (!op) {
45729- fscache_stat(&fscache_n_retrievals_nomem);
45730+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45731 return NULL;
45732 }
45733
45734@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45735 return 0;
45736 }
45737
45738- fscache_stat(&fscache_n_retrievals_wait);
45739+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
45740
45741 jif = jiffies;
45742 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45743 fscache_wait_bit_interruptible,
45744 TASK_INTERRUPTIBLE) != 0) {
45745- fscache_stat(&fscache_n_retrievals_intr);
45746+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45747 _leave(" = -ERESTARTSYS");
45748 return -ERESTARTSYS;
45749 }
45750@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45751 */
45752 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45753 struct fscache_retrieval *op,
45754- atomic_t *stat_op_waits,
45755- atomic_t *stat_object_dead)
45756+ atomic_unchecked_t *stat_op_waits,
45757+ atomic_unchecked_t *stat_object_dead)
45758 {
45759 int ret;
45760
45761@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45762 goto check_if_dead;
45763
45764 _debug(">>> WT");
45765- fscache_stat(stat_op_waits);
45766+ fscache_stat_unchecked(stat_op_waits);
45767 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45768 fscache_wait_bit_interruptible,
45769 TASK_INTERRUPTIBLE) < 0) {
45770@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45771
45772 check_if_dead:
45773 if (unlikely(fscache_object_is_dead(object))) {
45774- fscache_stat(stat_object_dead);
45775+ fscache_stat_unchecked(stat_object_dead);
45776 return -ENOBUFS;
45777 }
45778 return 0;
45779@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45780
45781 _enter("%p,%p,,,", cookie, page);
45782
45783- fscache_stat(&fscache_n_retrievals);
45784+ fscache_stat_unchecked(&fscache_n_retrievals);
45785
45786 if (hlist_empty(&cookie->backing_objects))
45787 goto nobufs;
45788@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45789 goto nobufs_unlock;
45790 spin_unlock(&cookie->lock);
45791
45792- fscache_stat(&fscache_n_retrieval_ops);
45793+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
45794
45795 /* pin the netfs read context in case we need to do the actual netfs
45796 * read because we've encountered a cache read failure */
45797@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45798
45799 error:
45800 if (ret == -ENOMEM)
45801- fscache_stat(&fscache_n_retrievals_nomem);
45802+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45803 else if (ret == -ERESTARTSYS)
45804- fscache_stat(&fscache_n_retrievals_intr);
45805+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45806 else if (ret == -ENODATA)
45807- fscache_stat(&fscache_n_retrievals_nodata);
45808+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45809 else if (ret < 0)
45810- fscache_stat(&fscache_n_retrievals_nobufs);
45811+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45812 else
45813- fscache_stat(&fscache_n_retrievals_ok);
45814+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
45815
45816 fscache_put_retrieval(op);
45817 _leave(" = %d", ret);
45818@@ -429,7 +429,7 @@ nobufs_unlock:
45819 spin_unlock(&cookie->lock);
45820 kfree(op);
45821 nobufs:
45822- fscache_stat(&fscache_n_retrievals_nobufs);
45823+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45824 _leave(" = -ENOBUFS");
45825 return -ENOBUFS;
45826 }
45827@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45828
45829 _enter("%p,,%d,,,", cookie, *nr_pages);
45830
45831- fscache_stat(&fscache_n_retrievals);
45832+ fscache_stat_unchecked(&fscache_n_retrievals);
45833
45834 if (hlist_empty(&cookie->backing_objects))
45835 goto nobufs;
45836@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45837 goto nobufs_unlock;
45838 spin_unlock(&cookie->lock);
45839
45840- fscache_stat(&fscache_n_retrieval_ops);
45841+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
45842
45843 /* pin the netfs read context in case we need to do the actual netfs
45844 * read because we've encountered a cache read failure */
45845@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45846
45847 error:
45848 if (ret == -ENOMEM)
45849- fscache_stat(&fscache_n_retrievals_nomem);
45850+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45851 else if (ret == -ERESTARTSYS)
45852- fscache_stat(&fscache_n_retrievals_intr);
45853+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45854 else if (ret == -ENODATA)
45855- fscache_stat(&fscache_n_retrievals_nodata);
45856+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45857 else if (ret < 0)
45858- fscache_stat(&fscache_n_retrievals_nobufs);
45859+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45860 else
45861- fscache_stat(&fscache_n_retrievals_ok);
45862+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
45863
45864 fscache_put_retrieval(op);
45865 _leave(" = %d", ret);
45866@@ -545,7 +545,7 @@ nobufs_unlock:
45867 spin_unlock(&cookie->lock);
45868 kfree(op);
45869 nobufs:
45870- fscache_stat(&fscache_n_retrievals_nobufs);
45871+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45872 _leave(" = -ENOBUFS");
45873 return -ENOBUFS;
45874 }
45875@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45876
45877 _enter("%p,%p,,,", cookie, page);
45878
45879- fscache_stat(&fscache_n_allocs);
45880+ fscache_stat_unchecked(&fscache_n_allocs);
45881
45882 if (hlist_empty(&cookie->backing_objects))
45883 goto nobufs;
45884@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45885 goto nobufs_unlock;
45886 spin_unlock(&cookie->lock);
45887
45888- fscache_stat(&fscache_n_alloc_ops);
45889+ fscache_stat_unchecked(&fscache_n_alloc_ops);
45890
45891 ret = fscache_wait_for_retrieval_activation(
45892 object, op,
45893@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45894
45895 error:
45896 if (ret == -ERESTARTSYS)
45897- fscache_stat(&fscache_n_allocs_intr);
45898+ fscache_stat_unchecked(&fscache_n_allocs_intr);
45899 else if (ret < 0)
45900- fscache_stat(&fscache_n_allocs_nobufs);
45901+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45902 else
45903- fscache_stat(&fscache_n_allocs_ok);
45904+ fscache_stat_unchecked(&fscache_n_allocs_ok);
45905
45906 fscache_put_retrieval(op);
45907 _leave(" = %d", ret);
45908@@ -625,7 +625,7 @@ nobufs_unlock:
45909 spin_unlock(&cookie->lock);
45910 kfree(op);
45911 nobufs:
45912- fscache_stat(&fscache_n_allocs_nobufs);
45913+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45914 _leave(" = -ENOBUFS");
45915 return -ENOBUFS;
45916 }
45917@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45918
45919 spin_lock(&cookie->stores_lock);
45920
45921- fscache_stat(&fscache_n_store_calls);
45922+ fscache_stat_unchecked(&fscache_n_store_calls);
45923
45924 /* find a page to store */
45925 page = NULL;
45926@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45927 page = results[0];
45928 _debug("gang %d [%lx]", n, page->index);
45929 if (page->index > op->store_limit) {
45930- fscache_stat(&fscache_n_store_pages_over_limit);
45931+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45932 goto superseded;
45933 }
45934
45935@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45936 spin_unlock(&cookie->stores_lock);
45937 spin_unlock(&object->lock);
45938
45939- fscache_stat(&fscache_n_store_pages);
45940+ fscache_stat_unchecked(&fscache_n_store_pages);
45941 fscache_stat(&fscache_n_cop_write_page);
45942 ret = object->cache->ops->write_page(op, page);
45943 fscache_stat_d(&fscache_n_cop_write_page);
45944@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45945 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45946 ASSERT(PageFsCache(page));
45947
45948- fscache_stat(&fscache_n_stores);
45949+ fscache_stat_unchecked(&fscache_n_stores);
45950
45951 op = kzalloc(sizeof(*op), GFP_NOIO);
45952 if (!op)
45953@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45954 spin_unlock(&cookie->stores_lock);
45955 spin_unlock(&object->lock);
45956
45957- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45958+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45959 op->store_limit = object->store_limit;
45960
45961 if (fscache_submit_op(object, &op->op) < 0)
45962@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45963
45964 spin_unlock(&cookie->lock);
45965 radix_tree_preload_end();
45966- fscache_stat(&fscache_n_store_ops);
45967- fscache_stat(&fscache_n_stores_ok);
45968+ fscache_stat_unchecked(&fscache_n_store_ops);
45969+ fscache_stat_unchecked(&fscache_n_stores_ok);
45970
45971 /* the work queue now carries its own ref on the object */
45972 fscache_put_operation(&op->op);
45973@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45974 return 0;
45975
45976 already_queued:
45977- fscache_stat(&fscache_n_stores_again);
45978+ fscache_stat_unchecked(&fscache_n_stores_again);
45979 already_pending:
45980 spin_unlock(&cookie->stores_lock);
45981 spin_unlock(&object->lock);
45982 spin_unlock(&cookie->lock);
45983 radix_tree_preload_end();
45984 kfree(op);
45985- fscache_stat(&fscache_n_stores_ok);
45986+ fscache_stat_unchecked(&fscache_n_stores_ok);
45987 _leave(" = 0");
45988 return 0;
45989
45990@@ -851,14 +851,14 @@ nobufs:
45991 spin_unlock(&cookie->lock);
45992 radix_tree_preload_end();
45993 kfree(op);
45994- fscache_stat(&fscache_n_stores_nobufs);
45995+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
45996 _leave(" = -ENOBUFS");
45997 return -ENOBUFS;
45998
45999 nomem_free:
46000 kfree(op);
46001 nomem:
46002- fscache_stat(&fscache_n_stores_oom);
46003+ fscache_stat_unchecked(&fscache_n_stores_oom);
46004 _leave(" = -ENOMEM");
46005 return -ENOMEM;
46006 }
46007@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
46008 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46009 ASSERTCMP(page, !=, NULL);
46010
46011- fscache_stat(&fscache_n_uncaches);
46012+ fscache_stat_unchecked(&fscache_n_uncaches);
46013
46014 /* cache withdrawal may beat us to it */
46015 if (!PageFsCache(page))
46016@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
46017 unsigned long loop;
46018
46019 #ifdef CONFIG_FSCACHE_STATS
46020- atomic_add(pagevec->nr, &fscache_n_marks);
46021+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
46022 #endif
46023
46024 for (loop = 0; loop < pagevec->nr; loop++) {
46025diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
46026index 4765190..2a067f2 100644
46027--- a/fs/fscache/stats.c
46028+++ b/fs/fscache/stats.c
46029@@ -18,95 +18,95 @@
46030 /*
46031 * operation counters
46032 */
46033-atomic_t fscache_n_op_pend;
46034-atomic_t fscache_n_op_run;
46035-atomic_t fscache_n_op_enqueue;
46036-atomic_t fscache_n_op_requeue;
46037-atomic_t fscache_n_op_deferred_release;
46038-atomic_t fscache_n_op_release;
46039-atomic_t fscache_n_op_gc;
46040-atomic_t fscache_n_op_cancelled;
46041-atomic_t fscache_n_op_rejected;
46042+atomic_unchecked_t fscache_n_op_pend;
46043+atomic_unchecked_t fscache_n_op_run;
46044+atomic_unchecked_t fscache_n_op_enqueue;
46045+atomic_unchecked_t fscache_n_op_requeue;
46046+atomic_unchecked_t fscache_n_op_deferred_release;
46047+atomic_unchecked_t fscache_n_op_release;
46048+atomic_unchecked_t fscache_n_op_gc;
46049+atomic_unchecked_t fscache_n_op_cancelled;
46050+atomic_unchecked_t fscache_n_op_rejected;
46051
46052-atomic_t fscache_n_attr_changed;
46053-atomic_t fscache_n_attr_changed_ok;
46054-atomic_t fscache_n_attr_changed_nobufs;
46055-atomic_t fscache_n_attr_changed_nomem;
46056-atomic_t fscache_n_attr_changed_calls;
46057+atomic_unchecked_t fscache_n_attr_changed;
46058+atomic_unchecked_t fscache_n_attr_changed_ok;
46059+atomic_unchecked_t fscache_n_attr_changed_nobufs;
46060+atomic_unchecked_t fscache_n_attr_changed_nomem;
46061+atomic_unchecked_t fscache_n_attr_changed_calls;
46062
46063-atomic_t fscache_n_allocs;
46064-atomic_t fscache_n_allocs_ok;
46065-atomic_t fscache_n_allocs_wait;
46066-atomic_t fscache_n_allocs_nobufs;
46067-atomic_t fscache_n_allocs_intr;
46068-atomic_t fscache_n_allocs_object_dead;
46069-atomic_t fscache_n_alloc_ops;
46070-atomic_t fscache_n_alloc_op_waits;
46071+atomic_unchecked_t fscache_n_allocs;
46072+atomic_unchecked_t fscache_n_allocs_ok;
46073+atomic_unchecked_t fscache_n_allocs_wait;
46074+atomic_unchecked_t fscache_n_allocs_nobufs;
46075+atomic_unchecked_t fscache_n_allocs_intr;
46076+atomic_unchecked_t fscache_n_allocs_object_dead;
46077+atomic_unchecked_t fscache_n_alloc_ops;
46078+atomic_unchecked_t fscache_n_alloc_op_waits;
46079
46080-atomic_t fscache_n_retrievals;
46081-atomic_t fscache_n_retrievals_ok;
46082-atomic_t fscache_n_retrievals_wait;
46083-atomic_t fscache_n_retrievals_nodata;
46084-atomic_t fscache_n_retrievals_nobufs;
46085-atomic_t fscache_n_retrievals_intr;
46086-atomic_t fscache_n_retrievals_nomem;
46087-atomic_t fscache_n_retrievals_object_dead;
46088-atomic_t fscache_n_retrieval_ops;
46089-atomic_t fscache_n_retrieval_op_waits;
46090+atomic_unchecked_t fscache_n_retrievals;
46091+atomic_unchecked_t fscache_n_retrievals_ok;
46092+atomic_unchecked_t fscache_n_retrievals_wait;
46093+atomic_unchecked_t fscache_n_retrievals_nodata;
46094+atomic_unchecked_t fscache_n_retrievals_nobufs;
46095+atomic_unchecked_t fscache_n_retrievals_intr;
46096+atomic_unchecked_t fscache_n_retrievals_nomem;
46097+atomic_unchecked_t fscache_n_retrievals_object_dead;
46098+atomic_unchecked_t fscache_n_retrieval_ops;
46099+atomic_unchecked_t fscache_n_retrieval_op_waits;
46100
46101-atomic_t fscache_n_stores;
46102-atomic_t fscache_n_stores_ok;
46103-atomic_t fscache_n_stores_again;
46104-atomic_t fscache_n_stores_nobufs;
46105-atomic_t fscache_n_stores_oom;
46106-atomic_t fscache_n_store_ops;
46107-atomic_t fscache_n_store_calls;
46108-atomic_t fscache_n_store_pages;
46109-atomic_t fscache_n_store_radix_deletes;
46110-atomic_t fscache_n_store_pages_over_limit;
46111+atomic_unchecked_t fscache_n_stores;
46112+atomic_unchecked_t fscache_n_stores_ok;
46113+atomic_unchecked_t fscache_n_stores_again;
46114+atomic_unchecked_t fscache_n_stores_nobufs;
46115+atomic_unchecked_t fscache_n_stores_oom;
46116+atomic_unchecked_t fscache_n_store_ops;
46117+atomic_unchecked_t fscache_n_store_calls;
46118+atomic_unchecked_t fscache_n_store_pages;
46119+atomic_unchecked_t fscache_n_store_radix_deletes;
46120+atomic_unchecked_t fscache_n_store_pages_over_limit;
46121
46122-atomic_t fscache_n_store_vmscan_not_storing;
46123-atomic_t fscache_n_store_vmscan_gone;
46124-atomic_t fscache_n_store_vmscan_busy;
46125-atomic_t fscache_n_store_vmscan_cancelled;
46126+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
46127+atomic_unchecked_t fscache_n_store_vmscan_gone;
46128+atomic_unchecked_t fscache_n_store_vmscan_busy;
46129+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
46130
46131-atomic_t fscache_n_marks;
46132-atomic_t fscache_n_uncaches;
46133+atomic_unchecked_t fscache_n_marks;
46134+atomic_unchecked_t fscache_n_uncaches;
46135
46136-atomic_t fscache_n_acquires;
46137-atomic_t fscache_n_acquires_null;
46138-atomic_t fscache_n_acquires_no_cache;
46139-atomic_t fscache_n_acquires_ok;
46140-atomic_t fscache_n_acquires_nobufs;
46141-atomic_t fscache_n_acquires_oom;
46142+atomic_unchecked_t fscache_n_acquires;
46143+atomic_unchecked_t fscache_n_acquires_null;
46144+atomic_unchecked_t fscache_n_acquires_no_cache;
46145+atomic_unchecked_t fscache_n_acquires_ok;
46146+atomic_unchecked_t fscache_n_acquires_nobufs;
46147+atomic_unchecked_t fscache_n_acquires_oom;
46148
46149-atomic_t fscache_n_updates;
46150-atomic_t fscache_n_updates_null;
46151-atomic_t fscache_n_updates_run;
46152+atomic_unchecked_t fscache_n_updates;
46153+atomic_unchecked_t fscache_n_updates_null;
46154+atomic_unchecked_t fscache_n_updates_run;
46155
46156-atomic_t fscache_n_relinquishes;
46157-atomic_t fscache_n_relinquishes_null;
46158-atomic_t fscache_n_relinquishes_waitcrt;
46159-atomic_t fscache_n_relinquishes_retire;
46160+atomic_unchecked_t fscache_n_relinquishes;
46161+atomic_unchecked_t fscache_n_relinquishes_null;
46162+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
46163+atomic_unchecked_t fscache_n_relinquishes_retire;
46164
46165-atomic_t fscache_n_cookie_index;
46166-atomic_t fscache_n_cookie_data;
46167-atomic_t fscache_n_cookie_special;
46168+atomic_unchecked_t fscache_n_cookie_index;
46169+atomic_unchecked_t fscache_n_cookie_data;
46170+atomic_unchecked_t fscache_n_cookie_special;
46171
46172-atomic_t fscache_n_object_alloc;
46173-atomic_t fscache_n_object_no_alloc;
46174-atomic_t fscache_n_object_lookups;
46175-atomic_t fscache_n_object_lookups_negative;
46176-atomic_t fscache_n_object_lookups_positive;
46177-atomic_t fscache_n_object_lookups_timed_out;
46178-atomic_t fscache_n_object_created;
46179-atomic_t fscache_n_object_avail;
46180-atomic_t fscache_n_object_dead;
46181+atomic_unchecked_t fscache_n_object_alloc;
46182+atomic_unchecked_t fscache_n_object_no_alloc;
46183+atomic_unchecked_t fscache_n_object_lookups;
46184+atomic_unchecked_t fscache_n_object_lookups_negative;
46185+atomic_unchecked_t fscache_n_object_lookups_positive;
46186+atomic_unchecked_t fscache_n_object_lookups_timed_out;
46187+atomic_unchecked_t fscache_n_object_created;
46188+atomic_unchecked_t fscache_n_object_avail;
46189+atomic_unchecked_t fscache_n_object_dead;
46190
46191-atomic_t fscache_n_checkaux_none;
46192-atomic_t fscache_n_checkaux_okay;
46193-atomic_t fscache_n_checkaux_update;
46194-atomic_t fscache_n_checkaux_obsolete;
46195+atomic_unchecked_t fscache_n_checkaux_none;
46196+atomic_unchecked_t fscache_n_checkaux_okay;
46197+atomic_unchecked_t fscache_n_checkaux_update;
46198+atomic_unchecked_t fscache_n_checkaux_obsolete;
46199
46200 atomic_t fscache_n_cop_alloc_object;
46201 atomic_t fscache_n_cop_lookup_object;
46202@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
46203 seq_puts(m, "FS-Cache statistics\n");
46204
46205 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
46206- atomic_read(&fscache_n_cookie_index),
46207- atomic_read(&fscache_n_cookie_data),
46208- atomic_read(&fscache_n_cookie_special));
46209+ atomic_read_unchecked(&fscache_n_cookie_index),
46210+ atomic_read_unchecked(&fscache_n_cookie_data),
46211+ atomic_read_unchecked(&fscache_n_cookie_special));
46212
46213 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
46214- atomic_read(&fscache_n_object_alloc),
46215- atomic_read(&fscache_n_object_no_alloc),
46216- atomic_read(&fscache_n_object_avail),
46217- atomic_read(&fscache_n_object_dead));
46218+ atomic_read_unchecked(&fscache_n_object_alloc),
46219+ atomic_read_unchecked(&fscache_n_object_no_alloc),
46220+ atomic_read_unchecked(&fscache_n_object_avail),
46221+ atomic_read_unchecked(&fscache_n_object_dead));
46222 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
46223- atomic_read(&fscache_n_checkaux_none),
46224- atomic_read(&fscache_n_checkaux_okay),
46225- atomic_read(&fscache_n_checkaux_update),
46226- atomic_read(&fscache_n_checkaux_obsolete));
46227+ atomic_read_unchecked(&fscache_n_checkaux_none),
46228+ atomic_read_unchecked(&fscache_n_checkaux_okay),
46229+ atomic_read_unchecked(&fscache_n_checkaux_update),
46230+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
46231
46232 seq_printf(m, "Pages : mrk=%u unc=%u\n",
46233- atomic_read(&fscache_n_marks),
46234- atomic_read(&fscache_n_uncaches));
46235+ atomic_read_unchecked(&fscache_n_marks),
46236+ atomic_read_unchecked(&fscache_n_uncaches));
46237
46238 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
46239 " oom=%u\n",
46240- atomic_read(&fscache_n_acquires),
46241- atomic_read(&fscache_n_acquires_null),
46242- atomic_read(&fscache_n_acquires_no_cache),
46243- atomic_read(&fscache_n_acquires_ok),
46244- atomic_read(&fscache_n_acquires_nobufs),
46245- atomic_read(&fscache_n_acquires_oom));
46246+ atomic_read_unchecked(&fscache_n_acquires),
46247+ atomic_read_unchecked(&fscache_n_acquires_null),
46248+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
46249+ atomic_read_unchecked(&fscache_n_acquires_ok),
46250+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
46251+ atomic_read_unchecked(&fscache_n_acquires_oom));
46252
46253 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
46254- atomic_read(&fscache_n_object_lookups),
46255- atomic_read(&fscache_n_object_lookups_negative),
46256- atomic_read(&fscache_n_object_lookups_positive),
46257- atomic_read(&fscache_n_object_created),
46258- atomic_read(&fscache_n_object_lookups_timed_out));
46259+ atomic_read_unchecked(&fscache_n_object_lookups),
46260+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
46261+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
46262+ atomic_read_unchecked(&fscache_n_object_created),
46263+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
46264
46265 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
46266- atomic_read(&fscache_n_updates),
46267- atomic_read(&fscache_n_updates_null),
46268- atomic_read(&fscache_n_updates_run));
46269+ atomic_read_unchecked(&fscache_n_updates),
46270+ atomic_read_unchecked(&fscache_n_updates_null),
46271+ atomic_read_unchecked(&fscache_n_updates_run));
46272
46273 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
46274- atomic_read(&fscache_n_relinquishes),
46275- atomic_read(&fscache_n_relinquishes_null),
46276- atomic_read(&fscache_n_relinquishes_waitcrt),
46277- atomic_read(&fscache_n_relinquishes_retire));
46278+ atomic_read_unchecked(&fscache_n_relinquishes),
46279+ atomic_read_unchecked(&fscache_n_relinquishes_null),
46280+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
46281+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
46282
46283 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
46284- atomic_read(&fscache_n_attr_changed),
46285- atomic_read(&fscache_n_attr_changed_ok),
46286- atomic_read(&fscache_n_attr_changed_nobufs),
46287- atomic_read(&fscache_n_attr_changed_nomem),
46288- atomic_read(&fscache_n_attr_changed_calls));
46289+ atomic_read_unchecked(&fscache_n_attr_changed),
46290+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
46291+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
46292+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
46293+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
46294
46295 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
46296- atomic_read(&fscache_n_allocs),
46297- atomic_read(&fscache_n_allocs_ok),
46298- atomic_read(&fscache_n_allocs_wait),
46299- atomic_read(&fscache_n_allocs_nobufs),
46300- atomic_read(&fscache_n_allocs_intr));
46301+ atomic_read_unchecked(&fscache_n_allocs),
46302+ atomic_read_unchecked(&fscache_n_allocs_ok),
46303+ atomic_read_unchecked(&fscache_n_allocs_wait),
46304+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
46305+ atomic_read_unchecked(&fscache_n_allocs_intr));
46306 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
46307- atomic_read(&fscache_n_alloc_ops),
46308- atomic_read(&fscache_n_alloc_op_waits),
46309- atomic_read(&fscache_n_allocs_object_dead));
46310+ atomic_read_unchecked(&fscache_n_alloc_ops),
46311+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
46312+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
46313
46314 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
46315 " int=%u oom=%u\n",
46316- atomic_read(&fscache_n_retrievals),
46317- atomic_read(&fscache_n_retrievals_ok),
46318- atomic_read(&fscache_n_retrievals_wait),
46319- atomic_read(&fscache_n_retrievals_nodata),
46320- atomic_read(&fscache_n_retrievals_nobufs),
46321- atomic_read(&fscache_n_retrievals_intr),
46322- atomic_read(&fscache_n_retrievals_nomem));
46323+ atomic_read_unchecked(&fscache_n_retrievals),
46324+ atomic_read_unchecked(&fscache_n_retrievals_ok),
46325+ atomic_read_unchecked(&fscache_n_retrievals_wait),
46326+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
46327+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
46328+ atomic_read_unchecked(&fscache_n_retrievals_intr),
46329+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
46330 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
46331- atomic_read(&fscache_n_retrieval_ops),
46332- atomic_read(&fscache_n_retrieval_op_waits),
46333- atomic_read(&fscache_n_retrievals_object_dead));
46334+ atomic_read_unchecked(&fscache_n_retrieval_ops),
46335+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
46336+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
46337
46338 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
46339- atomic_read(&fscache_n_stores),
46340- atomic_read(&fscache_n_stores_ok),
46341- atomic_read(&fscache_n_stores_again),
46342- atomic_read(&fscache_n_stores_nobufs),
46343- atomic_read(&fscache_n_stores_oom));
46344+ atomic_read_unchecked(&fscache_n_stores),
46345+ atomic_read_unchecked(&fscache_n_stores_ok),
46346+ atomic_read_unchecked(&fscache_n_stores_again),
46347+ atomic_read_unchecked(&fscache_n_stores_nobufs),
46348+ atomic_read_unchecked(&fscache_n_stores_oom));
46349 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
46350- atomic_read(&fscache_n_store_ops),
46351- atomic_read(&fscache_n_store_calls),
46352- atomic_read(&fscache_n_store_pages),
46353- atomic_read(&fscache_n_store_radix_deletes),
46354- atomic_read(&fscache_n_store_pages_over_limit));
46355+ atomic_read_unchecked(&fscache_n_store_ops),
46356+ atomic_read_unchecked(&fscache_n_store_calls),
46357+ atomic_read_unchecked(&fscache_n_store_pages),
46358+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
46359+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
46360
46361 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
46362- atomic_read(&fscache_n_store_vmscan_not_storing),
46363- atomic_read(&fscache_n_store_vmscan_gone),
46364- atomic_read(&fscache_n_store_vmscan_busy),
46365- atomic_read(&fscache_n_store_vmscan_cancelled));
46366+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
46367+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
46368+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
46369+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
46370
46371 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
46372- atomic_read(&fscache_n_op_pend),
46373- atomic_read(&fscache_n_op_run),
46374- atomic_read(&fscache_n_op_enqueue),
46375- atomic_read(&fscache_n_op_cancelled),
46376- atomic_read(&fscache_n_op_rejected));
46377+ atomic_read_unchecked(&fscache_n_op_pend),
46378+ atomic_read_unchecked(&fscache_n_op_run),
46379+ atomic_read_unchecked(&fscache_n_op_enqueue),
46380+ atomic_read_unchecked(&fscache_n_op_cancelled),
46381+ atomic_read_unchecked(&fscache_n_op_rejected));
46382 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
46383- atomic_read(&fscache_n_op_deferred_release),
46384- atomic_read(&fscache_n_op_release),
46385- atomic_read(&fscache_n_op_gc));
46386+ atomic_read_unchecked(&fscache_n_op_deferred_release),
46387+ atomic_read_unchecked(&fscache_n_op_release),
46388+ atomic_read_unchecked(&fscache_n_op_gc));
46389
46390 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
46391 atomic_read(&fscache_n_cop_alloc_object),
46392diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
46393index 3426521..3b75162 100644
46394--- a/fs/fuse/cuse.c
46395+++ b/fs/fuse/cuse.c
46396@@ -587,10 +587,12 @@ static int __init cuse_init(void)
46397 INIT_LIST_HEAD(&cuse_conntbl[i]);
46398
46399 /* inherit and extend fuse_dev_operations */
46400- cuse_channel_fops = fuse_dev_operations;
46401- cuse_channel_fops.owner = THIS_MODULE;
46402- cuse_channel_fops.open = cuse_channel_open;
46403- cuse_channel_fops.release = cuse_channel_release;
46404+ pax_open_kernel();
46405+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
46406+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
46407+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
46408+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
46409+ pax_close_kernel();
46410
46411 cuse_class = class_create(THIS_MODULE, "cuse");
46412 if (IS_ERR(cuse_class))
46413diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
46414index 5f3368a..8306426 100644
46415--- a/fs/fuse/dev.c
46416+++ b/fs/fuse/dev.c
46417@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
46418 ret = 0;
46419 pipe_lock(pipe);
46420
46421- if (!pipe->readers) {
46422+ if (!atomic_read(&pipe->readers)) {
46423 send_sig(SIGPIPE, current, 0);
46424 if (!ret)
46425 ret = -EPIPE;
46426diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
46427index 2066328..f5add3b 100644
46428--- a/fs/fuse/dir.c
46429+++ b/fs/fuse/dir.c
46430@@ -1175,7 +1175,7 @@ static char *read_link(struct dentry *dentry)
46431 return link;
46432 }
46433
46434-static void free_link(char *link)
46435+static void free_link(const char *link)
46436 {
46437 if (!IS_ERR(link))
46438 free_page((unsigned long) link);
46439diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
46440index 5698746..6086012 100644
46441--- a/fs/gfs2/inode.c
46442+++ b/fs/gfs2/inode.c
46443@@ -1487,7 +1487,7 @@ out:
46444
46445 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46446 {
46447- char *s = nd_get_link(nd);
46448+ const char *s = nd_get_link(nd);
46449 if (!IS_ERR(s))
46450 kfree(s);
46451 }
46452diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
46453index 3645cd3..786809c 100644
46454--- a/fs/hugetlbfs/inode.c
46455+++ b/fs/hugetlbfs/inode.c
46456@@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs_fs_type = {
46457 .kill_sb = kill_litter_super,
46458 };
46459
46460-static struct vfsmount *hugetlbfs_vfsmount;
46461+struct vfsmount *hugetlbfs_vfsmount;
46462
46463 static int can_do_hugetlb_shm(void)
46464 {
46465diff --git a/fs/inode.c b/fs/inode.c
46466index 83ab215..8842101 100644
46467--- a/fs/inode.c
46468+++ b/fs/inode.c
46469@@ -870,8 +870,8 @@ unsigned int get_next_ino(void)
46470
46471 #ifdef CONFIG_SMP
46472 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
46473- static atomic_t shared_last_ino;
46474- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
46475+ static atomic_unchecked_t shared_last_ino;
46476+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
46477
46478 res = next - LAST_INO_BATCH;
46479 }
46480diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
46481index eafb8d3..f423d37 100644
46482--- a/fs/jffs2/erase.c
46483+++ b/fs/jffs2/erase.c
46484@@ -438,7 +438,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
46485 struct jffs2_unknown_node marker = {
46486 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
46487 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46488- .totlen = cpu_to_je32(c->cleanmarker_size)
46489+ .totlen = cpu_to_je32(c->cleanmarker_size),
46490+ .hdr_crc = cpu_to_je32(0)
46491 };
46492
46493 jffs2_prealloc_raw_node_refs(c, jeb, 1);
46494diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
46495index 30e8f47..21f600c 100644
46496--- a/fs/jffs2/wbuf.c
46497+++ b/fs/jffs2/wbuf.c
46498@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
46499 {
46500 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
46501 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46502- .totlen = constant_cpu_to_je32(8)
46503+ .totlen = constant_cpu_to_je32(8),
46504+ .hdr_crc = constant_cpu_to_je32(0)
46505 };
46506
46507 /*
46508diff --git a/fs/jfs/super.c b/fs/jfs/super.c
46509index 682bca6..86b8e6e 100644
46510--- a/fs/jfs/super.c
46511+++ b/fs/jfs/super.c
46512@@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
46513
46514 jfs_inode_cachep =
46515 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
46516- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
46517+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
46518 init_once);
46519 if (jfs_inode_cachep == NULL)
46520 return -ENOMEM;
46521diff --git a/fs/libfs.c b/fs/libfs.c
46522index 5b2dbb3..7442d54 100644
46523--- a/fs/libfs.c
46524+++ b/fs/libfs.c
46525@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46526
46527 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
46528 struct dentry *next;
46529+ char d_name[sizeof(next->d_iname)];
46530+ const unsigned char *name;
46531+
46532 next = list_entry(p, struct dentry, d_u.d_child);
46533 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
46534 if (!simple_positive(next)) {
46535@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46536
46537 spin_unlock(&next->d_lock);
46538 spin_unlock(&dentry->d_lock);
46539- if (filldir(dirent, next->d_name.name,
46540+ name = next->d_name.name;
46541+ if (name == next->d_iname) {
46542+ memcpy(d_name, name, next->d_name.len);
46543+ name = d_name;
46544+ }
46545+ if (filldir(dirent, name,
46546 next->d_name.len, filp->f_pos,
46547 next->d_inode->i_ino,
46548 dt_type(next->d_inode)) < 0)
46549diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
46550index 8392cb8..80d6193 100644
46551--- a/fs/lockd/clntproc.c
46552+++ b/fs/lockd/clntproc.c
46553@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
46554 /*
46555 * Cookie counter for NLM requests
46556 */
46557-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46558+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46559
46560 void nlmclnt_next_cookie(struct nlm_cookie *c)
46561 {
46562- u32 cookie = atomic_inc_return(&nlm_cookie);
46563+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46564
46565 memcpy(c->data, &cookie, 4);
46566 c->len=4;
46567diff --git a/fs/locks.c b/fs/locks.c
46568index 637694b..f84a121 100644
46569--- a/fs/locks.c
46570+++ b/fs/locks.c
46571@@ -2074,16 +2074,16 @@ void locks_remove_flock(struct file *filp)
46572 return;
46573
46574 if (filp->f_op && filp->f_op->flock) {
46575- struct file_lock fl = {
46576+ struct file_lock flock = {
46577 .fl_pid = current->tgid,
46578 .fl_file = filp,
46579 .fl_flags = FL_FLOCK,
46580 .fl_type = F_UNLCK,
46581 .fl_end = OFFSET_MAX,
46582 };
46583- filp->f_op->flock(filp, F_SETLKW, &fl);
46584- if (fl.fl_ops && fl.fl_ops->fl_release_private)
46585- fl.fl_ops->fl_release_private(&fl);
46586+ filp->f_op->flock(filp, F_SETLKW, &flock);
46587+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
46588+ flock.fl_ops->fl_release_private(&flock);
46589 }
46590
46591 lock_flocks();
46592diff --git a/fs/namei.c b/fs/namei.c
46593index 46ea9cc..c7cf3a3 100644
46594--- a/fs/namei.c
46595+++ b/fs/namei.c
46596@@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
46597 if (ret != -EACCES)
46598 return ret;
46599
46600+#ifdef CONFIG_GRKERNSEC
46601+ /* we'll block if we have to log due to a denied capability use */
46602+ if (mask & MAY_NOT_BLOCK)
46603+ return -ECHILD;
46604+#endif
46605+
46606 if (S_ISDIR(inode->i_mode)) {
46607 /* DACs are overridable for directories */
46608- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46609- return 0;
46610 if (!(mask & MAY_WRITE))
46611- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46612+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46613+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46614 return 0;
46615+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46616+ return 0;
46617 return -EACCES;
46618 }
46619 /*
46620+ * Searching includes executable on directories, else just read.
46621+ */
46622+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46623+ if (mask == MAY_READ)
46624+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46625+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46626+ return 0;
46627+
46628+ /*
46629 * Read/write DACs are always overridable.
46630 * Executable DACs are overridable when there is
46631 * at least one exec bit set.
46632@@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
46633 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46634 return 0;
46635
46636- /*
46637- * Searching includes executable on directories, else just read.
46638- */
46639- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46640- if (mask == MAY_READ)
46641- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46642- return 0;
46643-
46644 return -EACCES;
46645 }
46646
46647@@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
46648 return error;
46649 }
46650
46651+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
46652+ dentry->d_inode, dentry, nd->path.mnt)) {
46653+ error = -EACCES;
46654+ *p = ERR_PTR(error); /* no ->put_link(), please */
46655+ path_put(&nd->path);
46656+ return error;
46657+ }
46658+
46659 nd->last_type = LAST_BIND;
46660 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
46661 error = PTR_ERR(*p);
46662 if (!IS_ERR(*p)) {
46663- char *s = nd_get_link(nd);
46664+ const char *s = nd_get_link(nd);
46665 error = 0;
46666 if (s)
46667 error = __vfs_follow_link(nd, s);
46668@@ -1650,6 +1666,21 @@ static int path_lookupat(int dfd, const char *name,
46669 if (!err)
46670 err = complete_walk(nd);
46671
46672+ if (!(nd->flags & LOOKUP_PARENT)) {
46673+#ifdef CONFIG_GRKERNSEC
46674+ if (flags & LOOKUP_RCU) {
46675+ if (!err)
46676+ path_put(&nd->path);
46677+ err = -ECHILD;
46678+ } else
46679+#endif
46680+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46681+ if (!err)
46682+ path_put(&nd->path);
46683+ err = -ENOENT;
46684+ }
46685+ }
46686+
46687 if (!err && nd->flags & LOOKUP_DIRECTORY) {
46688 if (!nd->inode->i_op->lookup) {
46689 path_put(&nd->path);
46690@@ -1677,6 +1708,15 @@ static int do_path_lookup(int dfd, const char *name,
46691 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
46692
46693 if (likely(!retval)) {
46694+ if (*name != '/' && nd->path.dentry && nd->inode) {
46695+#ifdef CONFIG_GRKERNSEC
46696+ if (flags & LOOKUP_RCU)
46697+ return -ECHILD;
46698+#endif
46699+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
46700+ return -ENOENT;
46701+ }
46702+
46703 if (unlikely(!audit_dummy_context())) {
46704 if (nd->path.dentry && nd->inode)
46705 audit_inode(name, nd->path.dentry);
46706@@ -2071,6 +2111,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
46707 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
46708 return -EPERM;
46709
46710+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
46711+ return -EPERM;
46712+ if (gr_handle_rawio(inode))
46713+ return -EPERM;
46714+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
46715+ return -EACCES;
46716+
46717 return 0;
46718 }
46719
46720@@ -2132,6 +2179,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46721 error = complete_walk(nd);
46722 if (error)
46723 return ERR_PTR(error);
46724+#ifdef CONFIG_GRKERNSEC
46725+ if (nd->flags & LOOKUP_RCU) {
46726+ error = -ECHILD;
46727+ goto exit;
46728+ }
46729+#endif
46730+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46731+ error = -ENOENT;
46732+ goto exit;
46733+ }
46734 audit_inode(pathname, nd->path.dentry);
46735 if (open_flag & O_CREAT) {
46736 error = -EISDIR;
46737@@ -2142,6 +2199,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46738 error = complete_walk(nd);
46739 if (error)
46740 return ERR_PTR(error);
46741+#ifdef CONFIG_GRKERNSEC
46742+ if (nd->flags & LOOKUP_RCU) {
46743+ error = -ECHILD;
46744+ goto exit;
46745+ }
46746+#endif
46747+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46748+ error = -ENOENT;
46749+ goto exit;
46750+ }
46751 audit_inode(pathname, dir);
46752 goto ok;
46753 }
46754@@ -2163,6 +2230,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46755 error = complete_walk(nd);
46756 if (error)
46757 return ERR_PTR(error);
46758+#ifdef CONFIG_GRKERNSEC
46759+ if (nd->flags & LOOKUP_RCU) {
46760+ error = -ECHILD;
46761+ goto exit;
46762+ }
46763+#endif
46764+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46765+ error = -ENOENT;
46766+ goto exit;
46767+ }
46768
46769 error = -ENOTDIR;
46770 if (nd->flags & LOOKUP_DIRECTORY) {
46771@@ -2203,6 +2280,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46772 /* Negative dentry, just create the file */
46773 if (!dentry->d_inode) {
46774 umode_t mode = op->mode;
46775+
46776+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
46777+ error = -EACCES;
46778+ goto exit_mutex_unlock;
46779+ }
46780+
46781 if (!IS_POSIXACL(dir->d_inode))
46782 mode &= ~current_umask();
46783 /*
46784@@ -2226,6 +2309,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46785 error = vfs_create(dir->d_inode, dentry, mode, nd);
46786 if (error)
46787 goto exit_mutex_unlock;
46788+ else
46789+ gr_handle_create(path->dentry, path->mnt);
46790 mutex_unlock(&dir->d_inode->i_mutex);
46791 dput(nd->path.dentry);
46792 nd->path.dentry = dentry;
46793@@ -2235,6 +2320,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46794 /*
46795 * It already exists.
46796 */
46797+
46798+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46799+ error = -ENOENT;
46800+ goto exit_mutex_unlock;
46801+ }
46802+
46803+ /* only check if O_CREAT is specified, all other checks need to go
46804+ into may_open */
46805+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
46806+ error = -EACCES;
46807+ goto exit_mutex_unlock;
46808+ }
46809+
46810 mutex_unlock(&dir->d_inode->i_mutex);
46811 audit_inode(pathname, path->dentry);
46812
46813@@ -2447,6 +2545,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
46814 *path = nd.path;
46815 return dentry;
46816 eexist:
46817+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46818+ dput(dentry);
46819+ dentry = ERR_PTR(-ENOENT);
46820+ goto fail;
46821+ }
46822 dput(dentry);
46823 dentry = ERR_PTR(-EEXIST);
46824 fail:
46825@@ -2469,6 +2572,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
46826 }
46827 EXPORT_SYMBOL(user_path_create);
46828
46829+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46830+{
46831+ char *tmp = getname(pathname);
46832+ struct dentry *res;
46833+ if (IS_ERR(tmp))
46834+ return ERR_CAST(tmp);
46835+ res = kern_path_create(dfd, tmp, path, is_dir);
46836+ if (IS_ERR(res))
46837+ putname(tmp);
46838+ else
46839+ *to = tmp;
46840+ return res;
46841+}
46842+
46843 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
46844 {
46845 int error = may_create(dir, dentry);
46846@@ -2536,6 +2653,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46847 error = mnt_want_write(path.mnt);
46848 if (error)
46849 goto out_dput;
46850+
46851+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
46852+ error = -EPERM;
46853+ goto out_drop_write;
46854+ }
46855+
46856+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
46857+ error = -EACCES;
46858+ goto out_drop_write;
46859+ }
46860+
46861 error = security_path_mknod(&path, dentry, mode, dev);
46862 if (error)
46863 goto out_drop_write;
46864@@ -2553,6 +2681,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46865 }
46866 out_drop_write:
46867 mnt_drop_write(path.mnt);
46868+
46869+ if (!error)
46870+ gr_handle_create(dentry, path.mnt);
46871 out_dput:
46872 dput(dentry);
46873 mutex_unlock(&path.dentry->d_inode->i_mutex);
46874@@ -2602,12 +2733,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
46875 error = mnt_want_write(path.mnt);
46876 if (error)
46877 goto out_dput;
46878+
46879+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
46880+ error = -EACCES;
46881+ goto out_drop_write;
46882+ }
46883+
46884 error = security_path_mkdir(&path, dentry, mode);
46885 if (error)
46886 goto out_drop_write;
46887 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
46888 out_drop_write:
46889 mnt_drop_write(path.mnt);
46890+
46891+ if (!error)
46892+ gr_handle_create(dentry, path.mnt);
46893 out_dput:
46894 dput(dentry);
46895 mutex_unlock(&path.dentry->d_inode->i_mutex);
46896@@ -2687,6 +2827,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46897 char * name;
46898 struct dentry *dentry;
46899 struct nameidata nd;
46900+ ino_t saved_ino = 0;
46901+ dev_t saved_dev = 0;
46902
46903 error = user_path_parent(dfd, pathname, &nd, &name);
46904 if (error)
46905@@ -2715,6 +2857,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
46906 error = -ENOENT;
46907 goto exit3;
46908 }
46909+
46910+ saved_ino = dentry->d_inode->i_ino;
46911+ saved_dev = gr_get_dev_from_dentry(dentry);
46912+
46913+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46914+ error = -EACCES;
46915+ goto exit3;
46916+ }
46917+
46918 error = mnt_want_write(nd.path.mnt);
46919 if (error)
46920 goto exit3;
46921@@ -2722,6 +2873,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46922 if (error)
46923 goto exit4;
46924 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46925+ if (!error && (saved_dev || saved_ino))
46926+ gr_handle_delete(saved_ino, saved_dev);
46927 exit4:
46928 mnt_drop_write(nd.path.mnt);
46929 exit3:
46930@@ -2784,6 +2937,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46931 struct dentry *dentry;
46932 struct nameidata nd;
46933 struct inode *inode = NULL;
46934+ ino_t saved_ino = 0;
46935+ dev_t saved_dev = 0;
46936
46937 error = user_path_parent(dfd, pathname, &nd, &name);
46938 if (error)
46939@@ -2806,6 +2961,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46940 if (!inode)
46941 goto slashes;
46942 ihold(inode);
46943+
46944+ if (inode->i_nlink <= 1) {
46945+ saved_ino = inode->i_ino;
46946+ saved_dev = gr_get_dev_from_dentry(dentry);
46947+ }
46948+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
46949+ error = -EACCES;
46950+ goto exit2;
46951+ }
46952+
46953 error = mnt_want_write(nd.path.mnt);
46954 if (error)
46955 goto exit2;
46956@@ -2813,6 +2978,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46957 if (error)
46958 goto exit3;
46959 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
46960+ if (!error && (saved_ino || saved_dev))
46961+ gr_handle_delete(saved_ino, saved_dev);
46962 exit3:
46963 mnt_drop_write(nd.path.mnt);
46964 exit2:
46965@@ -2888,10 +3055,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
46966 error = mnt_want_write(path.mnt);
46967 if (error)
46968 goto out_dput;
46969+
46970+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
46971+ error = -EACCES;
46972+ goto out_drop_write;
46973+ }
46974+
46975 error = security_path_symlink(&path, dentry, from);
46976 if (error)
46977 goto out_drop_write;
46978 error = vfs_symlink(path.dentry->d_inode, dentry, from);
46979+ if (!error)
46980+ gr_handle_create(dentry, path.mnt);
46981 out_drop_write:
46982 mnt_drop_write(path.mnt);
46983 out_dput:
46984@@ -2963,6 +3138,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46985 {
46986 struct dentry *new_dentry;
46987 struct path old_path, new_path;
46988+ char *to = NULL;
46989 int how = 0;
46990 int error;
46991
46992@@ -2986,7 +3162,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46993 if (error)
46994 return error;
46995
46996- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46997+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
46998 error = PTR_ERR(new_dentry);
46999 if (IS_ERR(new_dentry))
47000 goto out;
47001@@ -2997,13 +3173,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47002 error = mnt_want_write(new_path.mnt);
47003 if (error)
47004 goto out_dput;
47005+
47006+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
47007+ old_path.dentry->d_inode,
47008+ old_path.dentry->d_inode->i_mode, to)) {
47009+ error = -EACCES;
47010+ goto out_drop_write;
47011+ }
47012+
47013+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
47014+ old_path.dentry, old_path.mnt, to)) {
47015+ error = -EACCES;
47016+ goto out_drop_write;
47017+ }
47018+
47019 error = security_path_link(old_path.dentry, &new_path, new_dentry);
47020 if (error)
47021 goto out_drop_write;
47022 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
47023+ if (!error)
47024+ gr_handle_create(new_dentry, new_path.mnt);
47025 out_drop_write:
47026 mnt_drop_write(new_path.mnt);
47027 out_dput:
47028+ putname(to);
47029 dput(new_dentry);
47030 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
47031 path_put(&new_path);
47032@@ -3231,6 +3424,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47033 if (new_dentry == trap)
47034 goto exit5;
47035
47036+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
47037+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
47038+ to);
47039+ if (error)
47040+ goto exit5;
47041+
47042 error = mnt_want_write(oldnd.path.mnt);
47043 if (error)
47044 goto exit5;
47045@@ -3240,6 +3439,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47046 goto exit6;
47047 error = vfs_rename(old_dir->d_inode, old_dentry,
47048 new_dir->d_inode, new_dentry);
47049+ if (!error)
47050+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
47051+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
47052 exit6:
47053 mnt_drop_write(oldnd.path.mnt);
47054 exit5:
47055@@ -3265,6 +3467,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
47056
47057 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
47058 {
47059+ char tmpbuf[64];
47060+ const char *newlink;
47061 int len;
47062
47063 len = PTR_ERR(link);
47064@@ -3274,7 +3478,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
47065 len = strlen(link);
47066 if (len > (unsigned) buflen)
47067 len = buflen;
47068- if (copy_to_user(buffer, link, len))
47069+
47070+ if (len < sizeof(tmpbuf)) {
47071+ memcpy(tmpbuf, link, len);
47072+ newlink = tmpbuf;
47073+ } else
47074+ newlink = link;
47075+
47076+ if (copy_to_user(buffer, newlink, len))
47077 len = -EFAULT;
47078 out:
47079 return len;
47080diff --git a/fs/namespace.c b/fs/namespace.c
47081index e608199..9609cb9 100644
47082--- a/fs/namespace.c
47083+++ b/fs/namespace.c
47084@@ -1155,6 +1155,9 @@ static int do_umount(struct mount *mnt, int flags)
47085 if (!(sb->s_flags & MS_RDONLY))
47086 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
47087 up_write(&sb->s_umount);
47088+
47089+ gr_log_remount(mnt->mnt_devname, retval);
47090+
47091 return retval;
47092 }
47093
47094@@ -1174,6 +1177,9 @@ static int do_umount(struct mount *mnt, int flags)
47095 br_write_unlock(vfsmount_lock);
47096 up_write(&namespace_sem);
47097 release_mounts(&umount_list);
47098+
47099+ gr_log_unmount(mnt->mnt_devname, retval);
47100+
47101 return retval;
47102 }
47103
47104@@ -2175,6 +2181,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47105 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
47106 MS_STRICTATIME);
47107
47108+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
47109+ retval = -EPERM;
47110+ goto dput_out;
47111+ }
47112+
47113+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
47114+ retval = -EPERM;
47115+ goto dput_out;
47116+ }
47117+
47118 if (flags & MS_REMOUNT)
47119 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
47120 data_page);
47121@@ -2189,6 +2205,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47122 dev_name, data_page);
47123 dput_out:
47124 path_put(&path);
47125+
47126+ gr_log_mount(dev_name, dir_name, retval);
47127+
47128 return retval;
47129 }
47130
47131@@ -2470,6 +2489,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
47132 if (error)
47133 goto out2;
47134
47135+ if (gr_handle_chroot_pivot()) {
47136+ error = -EPERM;
47137+ goto out2;
47138+ }
47139+
47140 get_fs_root(current->fs, &root);
47141 error = lock_mount(&old);
47142 if (error)
47143diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
47144index 32c0658..b1c2045e 100644
47145--- a/fs/ncpfs/ncplib_kernel.h
47146+++ b/fs/ncpfs/ncplib_kernel.h
47147@@ -130,7 +130,7 @@ static inline int ncp_is_nfs_extras(struct ncp_server* server, unsigned int voln
47148 int ncp__io2vol(struct ncp_server *, unsigned char *, unsigned int *,
47149 const unsigned char *, unsigned int, int);
47150 int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
47151- const unsigned char *, unsigned int, int);
47152+ const unsigned char *, unsigned int, int) __size_overflow(5);
47153
47154 #define NCP_ESC ':'
47155 #define NCP_IO_TABLE(sb) (NCP_SBP(sb)->nls_io)
47156@@ -146,7 +146,7 @@ int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
47157 int ncp__io2vol(unsigned char *, unsigned int *,
47158 const unsigned char *, unsigned int, int);
47159 int ncp__vol2io(unsigned char *, unsigned int *,
47160- const unsigned char *, unsigned int, int);
47161+ const unsigned char *, unsigned int, int) __size_overflow(5);
47162
47163 #define NCP_IO_TABLE(sb) NULL
47164 #define ncp_tolower(t, c) tolower(c)
47165diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
47166index f649fba..236bf92 100644
47167--- a/fs/nfs/inode.c
47168+++ b/fs/nfs/inode.c
47169@@ -151,7 +151,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
47170 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
47171 nfsi->attrtimeo_timestamp = jiffies;
47172
47173- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
47174+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
47175 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
47176 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
47177 else
47178@@ -1003,16 +1003,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
47179 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
47180 }
47181
47182-static atomic_long_t nfs_attr_generation_counter;
47183+static atomic_long_unchecked_t nfs_attr_generation_counter;
47184
47185 static unsigned long nfs_read_attr_generation_counter(void)
47186 {
47187- return atomic_long_read(&nfs_attr_generation_counter);
47188+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
47189 }
47190
47191 unsigned long nfs_inc_attr_generation_counter(void)
47192 {
47193- return atomic_long_inc_return(&nfs_attr_generation_counter);
47194+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
47195 }
47196
47197 void nfs_fattr_init(struct nfs_fattr *fattr)
47198diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
47199index edf6d3e..bdd1da7 100644
47200--- a/fs/nfsd/vfs.c
47201+++ b/fs/nfsd/vfs.c
47202@@ -925,7 +925,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47203 } else {
47204 oldfs = get_fs();
47205 set_fs(KERNEL_DS);
47206- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
47207+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
47208 set_fs(oldfs);
47209 }
47210
47211@@ -1029,7 +1029,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47212
47213 /* Write the data. */
47214 oldfs = get_fs(); set_fs(KERNEL_DS);
47215- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
47216+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
47217 set_fs(oldfs);
47218 if (host_err < 0)
47219 goto out_nfserr;
47220@@ -1564,7 +1564,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
47221 */
47222
47223 oldfs = get_fs(); set_fs(KERNEL_DS);
47224- host_err = inode->i_op->readlink(dentry, buf, *lenp);
47225+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
47226 set_fs(oldfs);
47227
47228 if (host_err < 0)
47229diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
47230index 3568c8a..e0240d8 100644
47231--- a/fs/notify/fanotify/fanotify_user.c
47232+++ b/fs/notify/fanotify/fanotify_user.c
47233@@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
47234 goto out_close_fd;
47235
47236 ret = -EFAULT;
47237- if (copy_to_user(buf, &fanotify_event_metadata,
47238+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
47239+ copy_to_user(buf, &fanotify_event_metadata,
47240 fanotify_event_metadata.event_len))
47241 goto out_kill_access_response;
47242
47243diff --git a/fs/notify/notification.c b/fs/notify/notification.c
47244index ee18815..7aa5d01 100644
47245--- a/fs/notify/notification.c
47246+++ b/fs/notify/notification.c
47247@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
47248 * get set to 0 so it will never get 'freed'
47249 */
47250 static struct fsnotify_event *q_overflow_event;
47251-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47252+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47253
47254 /**
47255 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
47256@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47257 */
47258 u32 fsnotify_get_cookie(void)
47259 {
47260- return atomic_inc_return(&fsnotify_sync_cookie);
47261+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
47262 }
47263 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
47264
47265diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
47266index 99e3610..02c1068 100644
47267--- a/fs/ntfs/dir.c
47268+++ b/fs/ntfs/dir.c
47269@@ -1329,7 +1329,7 @@ find_next_index_buffer:
47270 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
47271 ~(s64)(ndir->itype.index.block_size - 1)));
47272 /* Bounds checks. */
47273- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47274+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47275 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
47276 "inode 0x%lx or driver bug.", vdir->i_ino);
47277 goto err_out;
47278diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
47279index c587e2d..3641eaa 100644
47280--- a/fs/ntfs/file.c
47281+++ b/fs/ntfs/file.c
47282@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
47283 #endif /* NTFS_RW */
47284 };
47285
47286-const struct file_operations ntfs_empty_file_ops = {};
47287+const struct file_operations ntfs_empty_file_ops __read_only;
47288
47289-const struct inode_operations ntfs_empty_inode_ops = {};
47290+const struct inode_operations ntfs_empty_inode_ops __read_only;
47291diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
47292index 210c352..a174f83 100644
47293--- a/fs/ocfs2/localalloc.c
47294+++ b/fs/ocfs2/localalloc.c
47295@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
47296 goto bail;
47297 }
47298
47299- atomic_inc(&osb->alloc_stats.moves);
47300+ atomic_inc_unchecked(&osb->alloc_stats.moves);
47301
47302 bail:
47303 if (handle)
47304diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
47305index d355e6e..578d905 100644
47306--- a/fs/ocfs2/ocfs2.h
47307+++ b/fs/ocfs2/ocfs2.h
47308@@ -235,11 +235,11 @@ enum ocfs2_vol_state
47309
47310 struct ocfs2_alloc_stats
47311 {
47312- atomic_t moves;
47313- atomic_t local_data;
47314- atomic_t bitmap_data;
47315- atomic_t bg_allocs;
47316- atomic_t bg_extends;
47317+ atomic_unchecked_t moves;
47318+ atomic_unchecked_t local_data;
47319+ atomic_unchecked_t bitmap_data;
47320+ atomic_unchecked_t bg_allocs;
47321+ atomic_unchecked_t bg_extends;
47322 };
47323
47324 enum ocfs2_local_alloc_state
47325diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
47326index ba5d97e..c77db25 100644
47327--- a/fs/ocfs2/suballoc.c
47328+++ b/fs/ocfs2/suballoc.c
47329@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
47330 mlog_errno(status);
47331 goto bail;
47332 }
47333- atomic_inc(&osb->alloc_stats.bg_extends);
47334+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
47335
47336 /* You should never ask for this much metadata */
47337 BUG_ON(bits_wanted >
47338@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
47339 mlog_errno(status);
47340 goto bail;
47341 }
47342- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47343+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47344
47345 *suballoc_loc = res.sr_bg_blkno;
47346 *suballoc_bit_start = res.sr_bit_offset;
47347@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
47348 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
47349 res->sr_bits);
47350
47351- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47352+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47353
47354 BUG_ON(res->sr_bits != 1);
47355
47356@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
47357 mlog_errno(status);
47358 goto bail;
47359 }
47360- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47361+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47362
47363 BUG_ON(res.sr_bits != 1);
47364
47365@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47366 cluster_start,
47367 num_clusters);
47368 if (!status)
47369- atomic_inc(&osb->alloc_stats.local_data);
47370+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
47371 } else {
47372 if (min_clusters > (osb->bitmap_cpg - 1)) {
47373 /* The only paths asking for contiguousness
47374@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47375 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
47376 res.sr_bg_blkno,
47377 res.sr_bit_offset);
47378- atomic_inc(&osb->alloc_stats.bitmap_data);
47379+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
47380 *num_clusters = res.sr_bits;
47381 }
47382 }
47383diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
47384index 604e12c..8426483 100644
47385--- a/fs/ocfs2/super.c
47386+++ b/fs/ocfs2/super.c
47387@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
47388 "%10s => GlobalAllocs: %d LocalAllocs: %d "
47389 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
47390 "Stats",
47391- atomic_read(&osb->alloc_stats.bitmap_data),
47392- atomic_read(&osb->alloc_stats.local_data),
47393- atomic_read(&osb->alloc_stats.bg_allocs),
47394- atomic_read(&osb->alloc_stats.moves),
47395- atomic_read(&osb->alloc_stats.bg_extends));
47396+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
47397+ atomic_read_unchecked(&osb->alloc_stats.local_data),
47398+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
47399+ atomic_read_unchecked(&osb->alloc_stats.moves),
47400+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
47401
47402 out += snprintf(buf + out, len - out,
47403 "%10s => State: %u Descriptor: %llu Size: %u bits "
47404@@ -2117,11 +2117,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
47405 spin_lock_init(&osb->osb_xattr_lock);
47406 ocfs2_init_steal_slots(osb);
47407
47408- atomic_set(&osb->alloc_stats.moves, 0);
47409- atomic_set(&osb->alloc_stats.local_data, 0);
47410- atomic_set(&osb->alloc_stats.bitmap_data, 0);
47411- atomic_set(&osb->alloc_stats.bg_allocs, 0);
47412- atomic_set(&osb->alloc_stats.bg_extends, 0);
47413+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
47414+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
47415+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
47416+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
47417+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
47418
47419 /* Copy the blockcheck stats from the superblock probe */
47420 osb->osb_ecc_stats = *stats;
47421diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
47422index 5d22872..523db20 100644
47423--- a/fs/ocfs2/symlink.c
47424+++ b/fs/ocfs2/symlink.c
47425@@ -142,7 +142,7 @@ bail:
47426
47427 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47428 {
47429- char *link = nd_get_link(nd);
47430+ const char *link = nd_get_link(nd);
47431 if (!IS_ERR(link))
47432 kfree(link);
47433 }
47434diff --git a/fs/open.c b/fs/open.c
47435index 77becc0..aad7bd9 100644
47436--- a/fs/open.c
47437+++ b/fs/open.c
47438@@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
47439 error = locks_verify_truncate(inode, NULL, length);
47440 if (!error)
47441 error = security_path_truncate(&path);
47442+
47443+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
47444+ error = -EACCES;
47445+
47446 if (!error)
47447 error = do_truncate(path.dentry, length, 0, NULL);
47448
47449@@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
47450 if (__mnt_is_readonly(path.mnt))
47451 res = -EROFS;
47452
47453+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
47454+ res = -EACCES;
47455+
47456 out_path_release:
47457 path_put(&path);
47458 out:
47459@@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
47460 if (error)
47461 goto dput_and_out;
47462
47463+ gr_log_chdir(path.dentry, path.mnt);
47464+
47465 set_fs_pwd(current->fs, &path);
47466
47467 dput_and_out:
47468@@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
47469 goto out_putf;
47470
47471 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
47472+
47473+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
47474+ error = -EPERM;
47475+
47476+ if (!error)
47477+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
47478+
47479 if (!error)
47480 set_fs_pwd(current->fs, &file->f_path);
47481 out_putf:
47482@@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
47483 if (error)
47484 goto dput_and_out;
47485
47486+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
47487+ goto dput_and_out;
47488+
47489 set_fs_root(current->fs, &path);
47490+
47491+ gr_handle_chroot_chdir(&path);
47492+
47493 error = 0;
47494 dput_and_out:
47495 path_put(&path);
47496@@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
47497 if (error)
47498 return error;
47499 mutex_lock(&inode->i_mutex);
47500+
47501+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
47502+ error = -EACCES;
47503+ goto out_unlock;
47504+ }
47505+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
47506+ error = -EACCES;
47507+ goto out_unlock;
47508+ }
47509+
47510 error = security_path_chmod(path, mode);
47511 if (error)
47512 goto out_unlock;
47513@@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
47514 int error;
47515 struct iattr newattrs;
47516
47517+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
47518+ return -EACCES;
47519+
47520 newattrs.ia_valid = ATTR_CTIME;
47521 if (user != (uid_t) -1) {
47522 newattrs.ia_valid |= ATTR_UID;
47523diff --git a/fs/pipe.c b/fs/pipe.c
47524index a932ced..6495412 100644
47525--- a/fs/pipe.c
47526+++ b/fs/pipe.c
47527@@ -420,9 +420,9 @@ redo:
47528 }
47529 if (bufs) /* More to do? */
47530 continue;
47531- if (!pipe->writers)
47532+ if (!atomic_read(&pipe->writers))
47533 break;
47534- if (!pipe->waiting_writers) {
47535+ if (!atomic_read(&pipe->waiting_writers)) {
47536 /* syscall merging: Usually we must not sleep
47537 * if O_NONBLOCK is set, or if we got some data.
47538 * But if a writer sleeps in kernel space, then
47539@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
47540 mutex_lock(&inode->i_mutex);
47541 pipe = inode->i_pipe;
47542
47543- if (!pipe->readers) {
47544+ if (!atomic_read(&pipe->readers)) {
47545 send_sig(SIGPIPE, current, 0);
47546 ret = -EPIPE;
47547 goto out;
47548@@ -530,7 +530,7 @@ redo1:
47549 for (;;) {
47550 int bufs;
47551
47552- if (!pipe->readers) {
47553+ if (!atomic_read(&pipe->readers)) {
47554 send_sig(SIGPIPE, current, 0);
47555 if (!ret)
47556 ret = -EPIPE;
47557@@ -616,9 +616,9 @@ redo2:
47558 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47559 do_wakeup = 0;
47560 }
47561- pipe->waiting_writers++;
47562+ atomic_inc(&pipe->waiting_writers);
47563 pipe_wait(pipe);
47564- pipe->waiting_writers--;
47565+ atomic_dec(&pipe->waiting_writers);
47566 }
47567 out:
47568 mutex_unlock(&inode->i_mutex);
47569@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47570 mask = 0;
47571 if (filp->f_mode & FMODE_READ) {
47572 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
47573- if (!pipe->writers && filp->f_version != pipe->w_counter)
47574+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
47575 mask |= POLLHUP;
47576 }
47577
47578@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47579 * Most Unices do not set POLLERR for FIFOs but on Linux they
47580 * behave exactly like pipes for poll().
47581 */
47582- if (!pipe->readers)
47583+ if (!atomic_read(&pipe->readers))
47584 mask |= POLLERR;
47585 }
47586
47587@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int decr, int decw)
47588
47589 mutex_lock(&inode->i_mutex);
47590 pipe = inode->i_pipe;
47591- pipe->readers -= decr;
47592- pipe->writers -= decw;
47593+ atomic_sub(decr, &pipe->readers);
47594+ atomic_sub(decw, &pipe->writers);
47595
47596- if (!pipe->readers && !pipe->writers) {
47597+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
47598 free_pipe_info(inode);
47599 } else {
47600 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
47601@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
47602
47603 if (inode->i_pipe) {
47604 ret = 0;
47605- inode->i_pipe->readers++;
47606+ atomic_inc(&inode->i_pipe->readers);
47607 }
47608
47609 mutex_unlock(&inode->i_mutex);
47610@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
47611
47612 if (inode->i_pipe) {
47613 ret = 0;
47614- inode->i_pipe->writers++;
47615+ atomic_inc(&inode->i_pipe->writers);
47616 }
47617
47618 mutex_unlock(&inode->i_mutex);
47619@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
47620 if (inode->i_pipe) {
47621 ret = 0;
47622 if (filp->f_mode & FMODE_READ)
47623- inode->i_pipe->readers++;
47624+ atomic_inc(&inode->i_pipe->readers);
47625 if (filp->f_mode & FMODE_WRITE)
47626- inode->i_pipe->writers++;
47627+ atomic_inc(&inode->i_pipe->writers);
47628 }
47629
47630 mutex_unlock(&inode->i_mutex);
47631@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
47632 inode->i_pipe = NULL;
47633 }
47634
47635-static struct vfsmount *pipe_mnt __read_mostly;
47636+struct vfsmount *pipe_mnt __read_mostly;
47637
47638 /*
47639 * pipefs_dname() is called from d_path().
47640@@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(void)
47641 goto fail_iput;
47642 inode->i_pipe = pipe;
47643
47644- pipe->readers = pipe->writers = 1;
47645+ atomic_set(&pipe->readers, 1);
47646+ atomic_set(&pipe->writers, 1);
47647 inode->i_fop = &rdwr_pipefifo_fops;
47648
47649 /*
47650diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
47651index 15af622..0e9f4467 100644
47652--- a/fs/proc/Kconfig
47653+++ b/fs/proc/Kconfig
47654@@ -30,12 +30,12 @@ config PROC_FS
47655
47656 config PROC_KCORE
47657 bool "/proc/kcore support" if !ARM
47658- depends on PROC_FS && MMU
47659+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
47660
47661 config PROC_VMCORE
47662 bool "/proc/vmcore support"
47663- depends on PROC_FS && CRASH_DUMP
47664- default y
47665+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
47666+ default n
47667 help
47668 Exports the dump image of crashed kernel in ELF format.
47669
47670@@ -59,8 +59,8 @@ config PROC_SYSCTL
47671 limited in memory.
47672
47673 config PROC_PAGE_MONITOR
47674- default y
47675- depends on PROC_FS && MMU
47676+ default n
47677+ depends on PROC_FS && MMU && !GRKERNSEC
47678 bool "Enable /proc page monitoring" if EXPERT
47679 help
47680 Various /proc files exist to monitor process memory utilization:
47681diff --git a/fs/proc/array.c b/fs/proc/array.c
47682index c602b8d..a7de642 100644
47683--- a/fs/proc/array.c
47684+++ b/fs/proc/array.c
47685@@ -60,6 +60,7 @@
47686 #include <linux/tty.h>
47687 #include <linux/string.h>
47688 #include <linux/mman.h>
47689+#include <linux/grsecurity.h>
47690 #include <linux/proc_fs.h>
47691 #include <linux/ioport.h>
47692 #include <linux/uaccess.h>
47693@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
47694 seq_putc(m, '\n');
47695 }
47696
47697+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47698+static inline void task_pax(struct seq_file *m, struct task_struct *p)
47699+{
47700+ if (p->mm)
47701+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
47702+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
47703+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47704+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47705+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47706+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47707+ else
47708+ seq_printf(m, "PaX:\t-----\n");
47709+}
47710+#endif
47711+
47712 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47713 struct pid *pid, struct task_struct *task)
47714 {
47715@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47716 task_cpus_allowed(m, task);
47717 cpuset_task_status_allowed(m, task);
47718 task_context_switch_counts(m, task);
47719+
47720+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47721+ task_pax(m, task);
47722+#endif
47723+
47724+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47725+ task_grsec_rbac(m, task);
47726+#endif
47727+
47728 return 0;
47729 }
47730
47731+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47732+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47733+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
47734+ _mm->pax_flags & MF_PAX_SEGMEXEC))
47735+#endif
47736+
47737 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47738 struct pid *pid, struct task_struct *task, int whole)
47739 {
47740@@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47741 char tcomm[sizeof(task->comm)];
47742 unsigned long flags;
47743
47744+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47745+ if (current->exec_id != m->exec_id) {
47746+ gr_log_badprocpid("stat");
47747+ return 0;
47748+ }
47749+#endif
47750+
47751 state = *get_task_state(task);
47752 vsize = eip = esp = 0;
47753 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47754@@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47755 gtime = task->gtime;
47756 }
47757
47758+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47759+ if (PAX_RAND_FLAGS(mm)) {
47760+ eip = 0;
47761+ esp = 0;
47762+ wchan = 0;
47763+ }
47764+#endif
47765+#ifdef CONFIG_GRKERNSEC_HIDESYM
47766+ wchan = 0;
47767+ eip =0;
47768+ esp =0;
47769+#endif
47770+
47771 /* scale priority and nice values from timeslices to -20..20 */
47772 /* to make it look like a "normal" Unix priority/nice value */
47773 priority = task_prio(task);
47774@@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47775 vsize,
47776 mm ? get_mm_rss(mm) : 0,
47777 rsslim,
47778+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47779+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
47780+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
47781+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
47782+#else
47783 mm ? (permitted ? mm->start_code : 1) : 0,
47784 mm ? (permitted ? mm->end_code : 1) : 0,
47785 (permitted && mm) ? mm->start_stack : 0,
47786+#endif
47787 esp,
47788 eip,
47789 /* The signal information here is obsolete.
47790@@ -536,8 +593,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47791 struct pid *pid, struct task_struct *task)
47792 {
47793 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
47794- struct mm_struct *mm = get_task_mm(task);
47795+ struct mm_struct *mm;
47796
47797+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47798+ if (current->exec_id != m->exec_id) {
47799+ gr_log_badprocpid("statm");
47800+ return 0;
47801+ }
47802+#endif
47803+ mm = get_task_mm(task);
47804 if (mm) {
47805 size = task_statm(mm, &shared, &text, &data, &resident);
47806 mmput(mm);
47807@@ -547,3 +611,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47808
47809 return 0;
47810 }
47811+
47812+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47813+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47814+{
47815+ u32 curr_ip = 0;
47816+ unsigned long flags;
47817+
47818+ if (lock_task_sighand(task, &flags)) {
47819+ curr_ip = task->signal->curr_ip;
47820+ unlock_task_sighand(task, &flags);
47821+ }
47822+
47823+ return sprintf(buffer, "%pI4\n", &curr_ip);
47824+}
47825+#endif
47826diff --git a/fs/proc/base.c b/fs/proc/base.c
47827index d4548dd..d101f84 100644
47828--- a/fs/proc/base.c
47829+++ b/fs/proc/base.c
47830@@ -109,6 +109,14 @@ struct pid_entry {
47831 union proc_op op;
47832 };
47833
47834+struct getdents_callback {
47835+ struct linux_dirent __user * current_dir;
47836+ struct linux_dirent __user * previous;
47837+ struct file * file;
47838+ int count;
47839+ int error;
47840+};
47841+
47842 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47843 .name = (NAME), \
47844 .len = sizeof(NAME) - 1, \
47845@@ -213,6 +221,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47846 if (!mm->arg_end)
47847 goto out_mm; /* Shh! No looking before we're done */
47848
47849+ if (gr_acl_handle_procpidmem(task))
47850+ goto out_mm;
47851+
47852 len = mm->arg_end - mm->arg_start;
47853
47854 if (len > PAGE_SIZE)
47855@@ -240,12 +251,28 @@ out:
47856 return res;
47857 }
47858
47859+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47860+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47861+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
47862+ _mm->pax_flags & MF_PAX_SEGMEXEC))
47863+#endif
47864+
47865 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47866 {
47867 struct mm_struct *mm = mm_for_maps(task);
47868 int res = PTR_ERR(mm);
47869 if (mm && !IS_ERR(mm)) {
47870 unsigned int nwords = 0;
47871+
47872+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47873+ /* allow if we're currently ptracing this task */
47874+ if (PAX_RAND_FLAGS(mm) &&
47875+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
47876+ mmput(mm);
47877+ return 0;
47878+ }
47879+#endif
47880+
47881 do {
47882 nwords += 2;
47883 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
47884@@ -259,7 +286,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
47885 }
47886
47887
47888-#ifdef CONFIG_KALLSYMS
47889+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47890 /*
47891 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47892 * Returns the resolved symbol. If that fails, simply return the address.
47893@@ -298,7 +325,7 @@ static void unlock_trace(struct task_struct *task)
47894 mutex_unlock(&task->signal->cred_guard_mutex);
47895 }
47896
47897-#ifdef CONFIG_STACKTRACE
47898+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47899
47900 #define MAX_STACK_TRACE_DEPTH 64
47901
47902@@ -489,7 +516,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
47903 return count;
47904 }
47905
47906-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47907+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47908 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47909 {
47910 long nr;
47911@@ -518,7 +545,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
47912 /************************************************************************/
47913
47914 /* permission checks */
47915-static int proc_fd_access_allowed(struct inode *inode)
47916+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47917 {
47918 struct task_struct *task;
47919 int allowed = 0;
47920@@ -528,7 +555,10 @@ static int proc_fd_access_allowed(struct inode *inode)
47921 */
47922 task = get_proc_task(inode);
47923 if (task) {
47924- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47925+ if (log)
47926+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47927+ else
47928+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47929 put_task_struct(task);
47930 }
47931 return allowed;
47932@@ -566,10 +596,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
47933 struct task_struct *task,
47934 int hide_pid_min)
47935 {
47936+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47937+ return false;
47938+
47939+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47940+ rcu_read_lock();
47941+ {
47942+ const struct cred *tmpcred = current_cred();
47943+ const struct cred *cred = __task_cred(task);
47944+
47945+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
47946+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47947+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
47948+#endif
47949+ ) {
47950+ rcu_read_unlock();
47951+ return true;
47952+ }
47953+ }
47954+ rcu_read_unlock();
47955+
47956+ if (!pid->hide_pid)
47957+ return false;
47958+#endif
47959+
47960 if (pid->hide_pid < hide_pid_min)
47961 return true;
47962 if (in_group_p(pid->pid_gid))
47963 return true;
47964+
47965 return ptrace_may_access(task, PTRACE_MODE_READ);
47966 }
47967
47968@@ -587,7 +642,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
47969 put_task_struct(task);
47970
47971 if (!has_perms) {
47972+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47973+ {
47974+#else
47975 if (pid->hide_pid == 2) {
47976+#endif
47977 /*
47978 * Let's make getdents(), stat(), and open()
47979 * consistent with each other. If a process
47980@@ -702,6 +761,10 @@ static int mem_open(struct inode* inode, struct file* file)
47981 file->f_mode |= FMODE_UNSIGNED_OFFSET;
47982 file->private_data = mm;
47983
47984+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47985+ file->f_version = current->exec_id;
47986+#endif
47987+
47988 return 0;
47989 }
47990
47991@@ -713,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
47992 ssize_t copied;
47993 char *page;
47994
47995+#ifdef CONFIG_GRKERNSEC
47996+ if (write)
47997+ return -EPERM;
47998+#endif
47999+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48000+ if (file->f_version != current->exec_id) {
48001+ gr_log_badprocpid("mem");
48002+ return 0;
48003+ }
48004+#endif
48005+
48006 if (!mm)
48007 return 0;
48008
48009@@ -813,6 +887,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
48010 if (!task)
48011 goto out_no_task;
48012
48013+ if (gr_acl_handle_procpidmem(task))
48014+ goto out;
48015+
48016 ret = -ENOMEM;
48017 page = (char *)__get_free_page(GFP_TEMPORARY);
48018 if (!page)
48019@@ -1434,7 +1511,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
48020 path_put(&nd->path);
48021
48022 /* Are we allowed to snoop on the tasks file descriptors? */
48023- if (!proc_fd_access_allowed(inode))
48024+ if (!proc_fd_access_allowed(inode, 0))
48025 goto out;
48026
48027 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
48028@@ -1473,8 +1550,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
48029 struct path path;
48030
48031 /* Are we allowed to snoop on the tasks file descriptors? */
48032- if (!proc_fd_access_allowed(inode))
48033- goto out;
48034+ /* logging this is needed for learning on chromium to work properly,
48035+ but we don't want to flood the logs from 'ps' which does a readlink
48036+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
48037+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
48038+ */
48039+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
48040+ if (!proc_fd_access_allowed(inode,0))
48041+ goto out;
48042+ } else {
48043+ if (!proc_fd_access_allowed(inode,1))
48044+ goto out;
48045+ }
48046
48047 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
48048 if (error)
48049@@ -1539,7 +1626,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
48050 rcu_read_lock();
48051 cred = __task_cred(task);
48052 inode->i_uid = cred->euid;
48053+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48054+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48055+#else
48056 inode->i_gid = cred->egid;
48057+#endif
48058 rcu_read_unlock();
48059 }
48060 security_task_to_inode(task, inode);
48061@@ -1575,10 +1666,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
48062 return -ENOENT;
48063 }
48064 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48065+#ifdef CONFIG_GRKERNSEC_PROC_USER
48066+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48067+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48068+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48069+#endif
48070 task_dumpable(task)) {
48071 cred = __task_cred(task);
48072 stat->uid = cred->euid;
48073+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48074+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
48075+#else
48076 stat->gid = cred->egid;
48077+#endif
48078 }
48079 }
48080 rcu_read_unlock();
48081@@ -1616,11 +1716,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
48082
48083 if (task) {
48084 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48085+#ifdef CONFIG_GRKERNSEC_PROC_USER
48086+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48087+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48088+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48089+#endif
48090 task_dumpable(task)) {
48091 rcu_read_lock();
48092 cred = __task_cred(task);
48093 inode->i_uid = cred->euid;
48094+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48095+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48096+#else
48097 inode->i_gid = cred->egid;
48098+#endif
48099 rcu_read_unlock();
48100 } else {
48101 inode->i_uid = 0;
48102@@ -1738,7 +1847,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
48103 int fd = proc_fd(inode);
48104
48105 if (task) {
48106- files = get_files_struct(task);
48107+ if (!gr_acl_handle_procpidmem(task))
48108+ files = get_files_struct(task);
48109 put_task_struct(task);
48110 }
48111 if (files) {
48112@@ -2355,11 +2465,21 @@ static const struct file_operations proc_map_files_operations = {
48113 */
48114 static int proc_fd_permission(struct inode *inode, int mask)
48115 {
48116+ struct task_struct *task;
48117 int rv = generic_permission(inode, mask);
48118- if (rv == 0)
48119- return 0;
48120+
48121 if (task_pid(current) == proc_pid(inode))
48122 rv = 0;
48123+
48124+ task = get_proc_task(inode);
48125+ if (task == NULL)
48126+ return rv;
48127+
48128+ if (gr_acl_handle_procpidmem(task))
48129+ rv = -EACCES;
48130+
48131+ put_task_struct(task);
48132+
48133 return rv;
48134 }
48135
48136@@ -2469,6 +2589,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
48137 if (!task)
48138 goto out_no_task;
48139
48140+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48141+ goto out;
48142+
48143 /*
48144 * Yes, it does not scale. And it should not. Don't add
48145 * new entries into /proc/<tgid>/ without very good reasons.
48146@@ -2513,6 +2636,9 @@ static int proc_pident_readdir(struct file *filp,
48147 if (!task)
48148 goto out_no_task;
48149
48150+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48151+ goto out;
48152+
48153 ret = 0;
48154 i = filp->f_pos;
48155 switch (i) {
48156@@ -2783,7 +2909,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
48157 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
48158 void *cookie)
48159 {
48160- char *s = nd_get_link(nd);
48161+ const char *s = nd_get_link(nd);
48162 if (!IS_ERR(s))
48163 __putname(s);
48164 }
48165@@ -2984,7 +3110,7 @@ static const struct pid_entry tgid_base_stuff[] = {
48166 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
48167 #endif
48168 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48169-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48170+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48171 INF("syscall", S_IRUGO, proc_pid_syscall),
48172 #endif
48173 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48174@@ -3009,10 +3135,10 @@ static const struct pid_entry tgid_base_stuff[] = {
48175 #ifdef CONFIG_SECURITY
48176 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48177 #endif
48178-#ifdef CONFIG_KALLSYMS
48179+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48180 INF("wchan", S_IRUGO, proc_pid_wchan),
48181 #endif
48182-#ifdef CONFIG_STACKTRACE
48183+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48184 ONE("stack", S_IRUGO, proc_pid_stack),
48185 #endif
48186 #ifdef CONFIG_SCHEDSTATS
48187@@ -3046,6 +3172,9 @@ static const struct pid_entry tgid_base_stuff[] = {
48188 #ifdef CONFIG_HARDWALL
48189 INF("hardwall", S_IRUGO, proc_pid_hardwall),
48190 #endif
48191+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48192+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
48193+#endif
48194 };
48195
48196 static int proc_tgid_base_readdir(struct file * filp,
48197@@ -3172,7 +3301,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
48198 if (!inode)
48199 goto out;
48200
48201+#ifdef CONFIG_GRKERNSEC_PROC_USER
48202+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
48203+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48204+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48205+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
48206+#else
48207 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
48208+#endif
48209 inode->i_op = &proc_tgid_base_inode_operations;
48210 inode->i_fop = &proc_tgid_base_operations;
48211 inode->i_flags|=S_IMMUTABLE;
48212@@ -3214,7 +3350,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
48213 if (!task)
48214 goto out;
48215
48216+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48217+ goto out_put_task;
48218+
48219 result = proc_pid_instantiate(dir, dentry, task, NULL);
48220+out_put_task:
48221 put_task_struct(task);
48222 out:
48223 return result;
48224@@ -3277,6 +3417,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
48225 static int fake_filldir(void *buf, const char *name, int namelen,
48226 loff_t offset, u64 ino, unsigned d_type)
48227 {
48228+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
48229+ __buf->error = -EINVAL;
48230 return 0;
48231 }
48232
48233@@ -3343,7 +3485,7 @@ static const struct pid_entry tid_base_stuff[] = {
48234 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
48235 #endif
48236 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48237-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48238+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48239 INF("syscall", S_IRUGO, proc_pid_syscall),
48240 #endif
48241 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48242@@ -3367,10 +3509,10 @@ static const struct pid_entry tid_base_stuff[] = {
48243 #ifdef CONFIG_SECURITY
48244 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48245 #endif
48246-#ifdef CONFIG_KALLSYMS
48247+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48248 INF("wchan", S_IRUGO, proc_pid_wchan),
48249 #endif
48250-#ifdef CONFIG_STACKTRACE
48251+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48252 ONE("stack", S_IRUGO, proc_pid_stack),
48253 #endif
48254 #ifdef CONFIG_SCHEDSTATS
48255diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
48256index 82676e3..5f8518a 100644
48257--- a/fs/proc/cmdline.c
48258+++ b/fs/proc/cmdline.c
48259@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
48260
48261 static int __init proc_cmdline_init(void)
48262 {
48263+#ifdef CONFIG_GRKERNSEC_PROC_ADD
48264+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
48265+#else
48266 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
48267+#endif
48268 return 0;
48269 }
48270 module_init(proc_cmdline_init);
48271diff --git a/fs/proc/devices.c b/fs/proc/devices.c
48272index b143471..bb105e5 100644
48273--- a/fs/proc/devices.c
48274+++ b/fs/proc/devices.c
48275@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
48276
48277 static int __init proc_devices_init(void)
48278 {
48279+#ifdef CONFIG_GRKERNSEC_PROC_ADD
48280+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
48281+#else
48282 proc_create("devices", 0, NULL, &proc_devinfo_operations);
48283+#endif
48284 return 0;
48285 }
48286 module_init(proc_devices_init);
48287diff --git a/fs/proc/inode.c b/fs/proc/inode.c
48288index 84fd323..f698a32 100644
48289--- a/fs/proc/inode.c
48290+++ b/fs/proc/inode.c
48291@@ -21,12 +21,18 @@
48292 #include <linux/seq_file.h>
48293 #include <linux/slab.h>
48294 #include <linux/mount.h>
48295+#include <linux/grsecurity.h>
48296
48297 #include <asm/system.h>
48298 #include <asm/uaccess.h>
48299
48300 #include "internal.h"
48301
48302+#ifdef CONFIG_PROC_SYSCTL
48303+extern const struct inode_operations proc_sys_inode_operations;
48304+extern const struct inode_operations proc_sys_dir_operations;
48305+#endif
48306+
48307 static void proc_evict_inode(struct inode *inode)
48308 {
48309 struct proc_dir_entry *de;
48310@@ -52,6 +58,13 @@ static void proc_evict_inode(struct inode *inode)
48311 ns_ops = PROC_I(inode)->ns_ops;
48312 if (ns_ops && ns_ops->put)
48313 ns_ops->put(PROC_I(inode)->ns);
48314+
48315+#ifdef CONFIG_PROC_SYSCTL
48316+ if (inode->i_op == &proc_sys_inode_operations ||
48317+ inode->i_op == &proc_sys_dir_operations)
48318+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
48319+#endif
48320+
48321 }
48322
48323 static struct kmem_cache * proc_inode_cachep;
48324@@ -457,7 +470,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
48325 if (de->mode) {
48326 inode->i_mode = de->mode;
48327 inode->i_uid = de->uid;
48328+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48329+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48330+#else
48331 inode->i_gid = de->gid;
48332+#endif
48333 }
48334 if (de->size)
48335 inode->i_size = de->size;
48336diff --git a/fs/proc/internal.h b/fs/proc/internal.h
48337index 2925775..4f08fae 100644
48338--- a/fs/proc/internal.h
48339+++ b/fs/proc/internal.h
48340@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48341 struct pid *pid, struct task_struct *task);
48342 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48343 struct pid *pid, struct task_struct *task);
48344+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48345+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
48346+#endif
48347 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
48348
48349 extern const struct file_operations proc_maps_operations;
48350diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
48351index d245cb2..f4e8498 100644
48352--- a/fs/proc/kcore.c
48353+++ b/fs/proc/kcore.c
48354@@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48355 * the addresses in the elf_phdr on our list.
48356 */
48357 start = kc_offset_to_vaddr(*fpos - elf_buflen);
48358- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
48359+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
48360+ if (tsz > buflen)
48361 tsz = buflen;
48362-
48363+
48364 while (buflen) {
48365 struct kcore_list *m;
48366
48367@@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48368 kfree(elf_buf);
48369 } else {
48370 if (kern_addr_valid(start)) {
48371- unsigned long n;
48372+ char *elf_buf;
48373+ mm_segment_t oldfs;
48374
48375- n = copy_to_user(buffer, (char *)start, tsz);
48376- /*
48377- * We cannot distingush between fault on source
48378- * and fault on destination. When this happens
48379- * we clear too and hope it will trigger the
48380- * EFAULT again.
48381- */
48382- if (n) {
48383- if (clear_user(buffer + tsz - n,
48384- n))
48385+ elf_buf = kmalloc(tsz, GFP_KERNEL);
48386+ if (!elf_buf)
48387+ return -ENOMEM;
48388+ oldfs = get_fs();
48389+ set_fs(KERNEL_DS);
48390+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
48391+ set_fs(oldfs);
48392+ if (copy_to_user(buffer, elf_buf, tsz)) {
48393+ kfree(elf_buf);
48394 return -EFAULT;
48395+ }
48396 }
48397+ set_fs(oldfs);
48398+ kfree(elf_buf);
48399 } else {
48400 if (clear_user(buffer, tsz))
48401 return -EFAULT;
48402@@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48403
48404 static int open_kcore(struct inode *inode, struct file *filp)
48405 {
48406+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
48407+ return -EPERM;
48408+#endif
48409 if (!capable(CAP_SYS_RAWIO))
48410 return -EPERM;
48411 if (kcore_need_update)
48412diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
48413index 80e4645..53e5fcf 100644
48414--- a/fs/proc/meminfo.c
48415+++ b/fs/proc/meminfo.c
48416@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
48417 vmi.used >> 10,
48418 vmi.largest_chunk >> 10
48419 #ifdef CONFIG_MEMORY_FAILURE
48420- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
48421+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
48422 #endif
48423 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
48424 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
48425diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
48426index b1822dd..df622cb 100644
48427--- a/fs/proc/nommu.c
48428+++ b/fs/proc/nommu.c
48429@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
48430 if (len < 1)
48431 len = 1;
48432 seq_printf(m, "%*c", len, ' ');
48433- seq_path(m, &file->f_path, "");
48434+ seq_path(m, &file->f_path, "\n\\");
48435 }
48436
48437 seq_putc(m, '\n');
48438diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
48439index 06e1cc1..177cd98 100644
48440--- a/fs/proc/proc_net.c
48441+++ b/fs/proc/proc_net.c
48442@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
48443 struct task_struct *task;
48444 struct nsproxy *ns;
48445 struct net *net = NULL;
48446+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48447+ const struct cred *cred = current_cred();
48448+#endif
48449+
48450+#ifdef CONFIG_GRKERNSEC_PROC_USER
48451+ if (cred->fsuid)
48452+ return net;
48453+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48454+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
48455+ return net;
48456+#endif
48457
48458 rcu_read_lock();
48459 task = pid_task(proc_pid(dir), PIDTYPE_PID);
48460diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
48461index 53c3bce..10ad159 100644
48462--- a/fs/proc/proc_sysctl.c
48463+++ b/fs/proc/proc_sysctl.c
48464@@ -9,11 +9,13 @@
48465 #include <linux/namei.h>
48466 #include "internal.h"
48467
48468+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
48469+
48470 static const struct dentry_operations proc_sys_dentry_operations;
48471 static const struct file_operations proc_sys_file_operations;
48472-static const struct inode_operations proc_sys_inode_operations;
48473+const struct inode_operations proc_sys_inode_operations;
48474 static const struct file_operations proc_sys_dir_file_operations;
48475-static const struct inode_operations proc_sys_dir_operations;
48476+const struct inode_operations proc_sys_dir_operations;
48477
48478 void proc_sys_poll_notify(struct ctl_table_poll *poll)
48479 {
48480@@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
48481
48482 err = NULL;
48483 d_set_d_op(dentry, &proc_sys_dentry_operations);
48484+
48485+ gr_handle_proc_create(dentry, inode);
48486+
48487 d_add(dentry, inode);
48488
48489+ if (gr_handle_sysctl(p, MAY_EXEC))
48490+ err = ERR_PTR(-ENOENT);
48491+
48492 out:
48493 sysctl_head_finish(head);
48494 return err;
48495@@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48496 if (!table->proc_handler)
48497 goto out;
48498
48499+#ifdef CONFIG_GRKERNSEC
48500+ error = -EPERM;
48501+ if (write && !capable(CAP_SYS_ADMIN))
48502+ goto out;
48503+#endif
48504+
48505 /* careful: calling conventions are nasty here */
48506 res = count;
48507 error = table->proc_handler(table, write, buf, &res, ppos);
48508@@ -260,6 +274,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
48509 return -ENOMEM;
48510 } else {
48511 d_set_d_op(child, &proc_sys_dentry_operations);
48512+
48513+ gr_handle_proc_create(child, inode);
48514+
48515 d_add(child, inode);
48516 }
48517 } else {
48518@@ -288,6 +305,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
48519 if (*pos < file->f_pos)
48520 continue;
48521
48522+ if (gr_handle_sysctl(table, 0))
48523+ continue;
48524+
48525 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
48526 if (res)
48527 return res;
48528@@ -413,6 +433,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
48529 if (IS_ERR(head))
48530 return PTR_ERR(head);
48531
48532+ if (table && gr_handle_sysctl(table, MAY_EXEC))
48533+ return -ENOENT;
48534+
48535 generic_fillattr(inode, stat);
48536 if (table)
48537 stat->mode = (stat->mode & S_IFMT) | table->mode;
48538@@ -435,13 +458,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
48539 .llseek = generic_file_llseek,
48540 };
48541
48542-static const struct inode_operations proc_sys_inode_operations = {
48543+const struct inode_operations proc_sys_inode_operations = {
48544 .permission = proc_sys_permission,
48545 .setattr = proc_sys_setattr,
48546 .getattr = proc_sys_getattr,
48547 };
48548
48549-static const struct inode_operations proc_sys_dir_operations = {
48550+const struct inode_operations proc_sys_dir_operations = {
48551 .lookup = proc_sys_lookup,
48552 .permission = proc_sys_permission,
48553 .setattr = proc_sys_setattr,
48554diff --git a/fs/proc/root.c b/fs/proc/root.c
48555index 46a15d8..335631a 100644
48556--- a/fs/proc/root.c
48557+++ b/fs/proc/root.c
48558@@ -187,7 +187,15 @@ void __init proc_root_init(void)
48559 #ifdef CONFIG_PROC_DEVICETREE
48560 proc_device_tree_init();
48561 #endif
48562+#ifdef CONFIG_GRKERNSEC_PROC_ADD
48563+#ifdef CONFIG_GRKERNSEC_PROC_USER
48564+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
48565+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48566+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48567+#endif
48568+#else
48569 proc_mkdir("bus", NULL);
48570+#endif
48571 proc_sys_init();
48572 }
48573
48574diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
48575index 3efa725..23c925b 100644
48576--- a/fs/proc/task_mmu.c
48577+++ b/fs/proc/task_mmu.c
48578@@ -11,6 +11,7 @@
48579 #include <linux/rmap.h>
48580 #include <linux/swap.h>
48581 #include <linux/swapops.h>
48582+#include <linux/grsecurity.h>
48583
48584 #include <asm/elf.h>
48585 #include <asm/uaccess.h>
48586@@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48587 "VmExe:\t%8lu kB\n"
48588 "VmLib:\t%8lu kB\n"
48589 "VmPTE:\t%8lu kB\n"
48590- "VmSwap:\t%8lu kB\n",
48591- hiwater_vm << (PAGE_SHIFT-10),
48592+ "VmSwap:\t%8lu kB\n"
48593+
48594+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48595+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
48596+#endif
48597+
48598+ ,hiwater_vm << (PAGE_SHIFT-10),
48599 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
48600 mm->locked_vm << (PAGE_SHIFT-10),
48601 mm->pinned_vm << (PAGE_SHIFT-10),
48602@@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48603 data << (PAGE_SHIFT-10),
48604 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
48605 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
48606- swap << (PAGE_SHIFT-10));
48607+ swap << (PAGE_SHIFT-10)
48608+
48609+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48610+ , mm->context.user_cs_base, mm->context.user_cs_limit
48611+#endif
48612+
48613+ );
48614 }
48615
48616 unsigned long task_vsize(struct mm_struct *mm)
48617@@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
48618 return ret;
48619 }
48620
48621+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48622+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48623+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
48624+ _mm->pax_flags & MF_PAX_SEGMEXEC))
48625+#endif
48626+
48627 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48628 {
48629 struct mm_struct *mm = vma->vm_mm;
48630@@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48631 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
48632 }
48633
48634- /* We don't show the stack guard page in /proc/maps */
48635+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48636+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
48637+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
48638+#else
48639 start = vma->vm_start;
48640- if (stack_guard_page_start(vma, start))
48641- start += PAGE_SIZE;
48642 end = vma->vm_end;
48643- if (stack_guard_page_end(vma, end))
48644- end -= PAGE_SIZE;
48645+#endif
48646
48647 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
48648 start,
48649@@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48650 flags & VM_WRITE ? 'w' : '-',
48651 flags & VM_EXEC ? 'x' : '-',
48652 flags & VM_MAYSHARE ? 's' : 'p',
48653+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48654+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48655+#else
48656 pgoff,
48657+#endif
48658 MAJOR(dev), MINOR(dev), ino, &len);
48659
48660 /*
48661@@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48662 */
48663 if (file) {
48664 pad_len_spaces(m, len);
48665- seq_path(m, &file->f_path, "\n");
48666+ seq_path(m, &file->f_path, "\n\\");
48667 } else {
48668 const char *name = arch_vma_name(vma);
48669 if (!name) {
48670@@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
48671 if (vma->vm_start <= mm->brk &&
48672 vma->vm_end >= mm->start_brk) {
48673 name = "[heap]";
48674- } else if (vma->vm_start <= mm->start_stack &&
48675- vma->vm_end >= mm->start_stack) {
48676+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48677+ (vma->vm_start <= mm->start_stack &&
48678+ vma->vm_end >= mm->start_stack)) {
48679 name = "[stack]";
48680 }
48681 } else {
48682@@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
48683 struct proc_maps_private *priv = m->private;
48684 struct task_struct *task = priv->task;
48685
48686+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48687+ if (current->exec_id != m->exec_id) {
48688+ gr_log_badprocpid("maps");
48689+ return 0;
48690+ }
48691+#endif
48692+
48693 show_map_vma(m, vma);
48694
48695 if (m->count < m->size) /* vma is copied successfully */
48696@@ -437,12 +467,23 @@ static int show_smap(struct seq_file *m, void *v)
48697 .private = &mss,
48698 };
48699
48700+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48701+ if (current->exec_id != m->exec_id) {
48702+ gr_log_badprocpid("smaps");
48703+ return 0;
48704+ }
48705+#endif
48706 memset(&mss, 0, sizeof mss);
48707- mss.vma = vma;
48708- /* mmap_sem is held in m_start */
48709- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48710- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48711-
48712+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48713+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48714+#endif
48715+ mss.vma = vma;
48716+ /* mmap_sem is held in m_start */
48717+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48718+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48719+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48720+ }
48721+#endif
48722 show_map_vma(m, vma);
48723
48724 seq_printf(m,
48725@@ -460,7 +501,11 @@ static int show_smap(struct seq_file *m, void *v)
48726 "KernelPageSize: %8lu kB\n"
48727 "MMUPageSize: %8lu kB\n"
48728 "Locked: %8lu kB\n",
48729+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48730+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48731+#else
48732 (vma->vm_end - vma->vm_start) >> 10,
48733+#endif
48734 mss.resident >> 10,
48735 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48736 mss.shared_clean >> 10,
48737@@ -1024,6 +1069,13 @@ static int show_numa_map(struct seq_file *m, void *v)
48738 int n;
48739 char buffer[50];
48740
48741+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48742+ if (current->exec_id != m->exec_id) {
48743+ gr_log_badprocpid("numa_maps");
48744+ return 0;
48745+ }
48746+#endif
48747+
48748 if (!mm)
48749 return 0;
48750
48751@@ -1041,11 +1093,15 @@ static int show_numa_map(struct seq_file *m, void *v)
48752 mpol_to_str(buffer, sizeof(buffer), pol, 0);
48753 mpol_cond_put(pol);
48754
48755+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48756+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
48757+#else
48758 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
48759+#endif
48760
48761 if (file) {
48762 seq_printf(m, " file=");
48763- seq_path(m, &file->f_path, "\n\t= ");
48764+ seq_path(m, &file->f_path, "\n\t\\= ");
48765 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48766 seq_printf(m, " heap");
48767 } else if (vma->vm_start <= mm->start_stack &&
48768diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48769index 980de54..2a4db5f 100644
48770--- a/fs/proc/task_nommu.c
48771+++ b/fs/proc/task_nommu.c
48772@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48773 else
48774 bytes += kobjsize(mm);
48775
48776- if (current->fs && current->fs->users > 1)
48777+ if (current->fs && atomic_read(&current->fs->users) > 1)
48778 sbytes += kobjsize(current->fs);
48779 else
48780 bytes += kobjsize(current->fs);
48781@@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
48782
48783 if (file) {
48784 pad_len_spaces(m, len);
48785- seq_path(m, &file->f_path, "");
48786+ seq_path(m, &file->f_path, "\n\\");
48787 } else if (mm) {
48788 if (vma->vm_start <= mm->start_stack &&
48789 vma->vm_end >= mm->start_stack) {
48790diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48791index d67908b..d13f6a6 100644
48792--- a/fs/quota/netlink.c
48793+++ b/fs/quota/netlink.c
48794@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
48795 void quota_send_warning(short type, unsigned int id, dev_t dev,
48796 const char warntype)
48797 {
48798- static atomic_t seq;
48799+ static atomic_unchecked_t seq;
48800 struct sk_buff *skb;
48801 void *msg_head;
48802 int ret;
48803@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
48804 "VFS: Not enough memory to send quota warning.\n");
48805 return;
48806 }
48807- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
48808+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
48809 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
48810 if (!msg_head) {
48811 printk(KERN_ERR
48812diff --git a/fs/readdir.c b/fs/readdir.c
48813index 356f715..c918d38 100644
48814--- a/fs/readdir.c
48815+++ b/fs/readdir.c
48816@@ -17,6 +17,7 @@
48817 #include <linux/security.h>
48818 #include <linux/syscalls.h>
48819 #include <linux/unistd.h>
48820+#include <linux/namei.h>
48821
48822 #include <asm/uaccess.h>
48823
48824@@ -67,6 +68,7 @@ struct old_linux_dirent {
48825
48826 struct readdir_callback {
48827 struct old_linux_dirent __user * dirent;
48828+ struct file * file;
48829 int result;
48830 };
48831
48832@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
48833 buf->result = -EOVERFLOW;
48834 return -EOVERFLOW;
48835 }
48836+
48837+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48838+ return 0;
48839+
48840 buf->result++;
48841 dirent = buf->dirent;
48842 if (!access_ok(VERIFY_WRITE, dirent,
48843@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
48844
48845 buf.result = 0;
48846 buf.dirent = dirent;
48847+ buf.file = file;
48848
48849 error = vfs_readdir(file, fillonedir, &buf);
48850 if (buf.result)
48851@@ -142,6 +149,7 @@ struct linux_dirent {
48852 struct getdents_callback {
48853 struct linux_dirent __user * current_dir;
48854 struct linux_dirent __user * previous;
48855+ struct file * file;
48856 int count;
48857 int error;
48858 };
48859@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
48860 buf->error = -EOVERFLOW;
48861 return -EOVERFLOW;
48862 }
48863+
48864+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48865+ return 0;
48866+
48867 dirent = buf->previous;
48868 if (dirent) {
48869 if (__put_user(offset, &dirent->d_off))
48870@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
48871 buf.previous = NULL;
48872 buf.count = count;
48873 buf.error = 0;
48874+ buf.file = file;
48875
48876 error = vfs_readdir(file, filldir, &buf);
48877 if (error >= 0)
48878@@ -229,6 +242,7 @@ out:
48879 struct getdents_callback64 {
48880 struct linux_dirent64 __user * current_dir;
48881 struct linux_dirent64 __user * previous;
48882+ struct file *file;
48883 int count;
48884 int error;
48885 };
48886@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
48887 buf->error = -EINVAL; /* only used if we fail.. */
48888 if (reclen > buf->count)
48889 return -EINVAL;
48890+
48891+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48892+ return 0;
48893+
48894 dirent = buf->previous;
48895 if (dirent) {
48896 if (__put_user(offset, &dirent->d_off))
48897@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48898
48899 buf.current_dir = dirent;
48900 buf.previous = NULL;
48901+ buf.file = file;
48902 buf.count = count;
48903 buf.error = 0;
48904
48905@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48906 error = buf.error;
48907 lastdirent = buf.previous;
48908 if (lastdirent) {
48909- typeof(lastdirent->d_off) d_off = file->f_pos;
48910+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48911 if (__put_user(d_off, &lastdirent->d_off))
48912 error = -EFAULT;
48913 else
48914diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
48915index 60c0804..d814f98 100644
48916--- a/fs/reiserfs/do_balan.c
48917+++ b/fs/reiserfs/do_balan.c
48918@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
48919 return;
48920 }
48921
48922- atomic_inc(&(fs_generation(tb->tb_sb)));
48923+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
48924 do_balance_starts(tb);
48925
48926 /* balance leaf returns 0 except if combining L R and S into
48927diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
48928index 7a99811..a7c96c4 100644
48929--- a/fs/reiserfs/procfs.c
48930+++ b/fs/reiserfs/procfs.c
48931@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
48932 "SMALL_TAILS " : "NO_TAILS ",
48933 replay_only(sb) ? "REPLAY_ONLY " : "",
48934 convert_reiserfs(sb) ? "CONV " : "",
48935- atomic_read(&r->s_generation_counter),
48936+ atomic_read_unchecked(&r->s_generation_counter),
48937 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
48938 SF(s_do_balance), SF(s_unneeded_left_neighbor),
48939 SF(s_good_search_by_key_reada), SF(s_bmaps),
48940diff --git a/fs/select.c b/fs/select.c
48941index e782258..3b4b44c 100644
48942--- a/fs/select.c
48943+++ b/fs/select.c
48944@@ -20,6 +20,7 @@
48945 #include <linux/module.h>
48946 #include <linux/slab.h>
48947 #include <linux/poll.h>
48948+#include <linux/security.h>
48949 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
48950 #include <linux/file.h>
48951 #include <linux/fdtable.h>
48952@@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
48953 struct poll_list *walk = head;
48954 unsigned long todo = nfds;
48955
48956+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
48957 if (nfds > rlimit(RLIMIT_NOFILE))
48958 return -EINVAL;
48959
48960diff --git a/fs/seq_file.c b/fs/seq_file.c
48961index 4023d6b..ab46c6a 100644
48962--- a/fs/seq_file.c
48963+++ b/fs/seq_file.c
48964@@ -9,6 +9,7 @@
48965 #include <linux/module.h>
48966 #include <linux/seq_file.h>
48967 #include <linux/slab.h>
48968+#include <linux/sched.h>
48969
48970 #include <asm/uaccess.h>
48971 #include <asm/page.h>
48972@@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
48973 memset(p, 0, sizeof(*p));
48974 mutex_init(&p->lock);
48975 p->op = op;
48976+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48977+ p->exec_id = current->exec_id;
48978+#endif
48979
48980 /*
48981 * Wrappers around seq_open(e.g. swaps_open) need to be
48982@@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
48983 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
48984 void *data)
48985 {
48986- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
48987+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
48988 int res = -ENOMEM;
48989
48990 if (op) {
48991diff --git a/fs/splice.c b/fs/splice.c
48992index 1ec0493..d6ab5c2 100644
48993--- a/fs/splice.c
48994+++ b/fs/splice.c
48995@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
48996 pipe_lock(pipe);
48997
48998 for (;;) {
48999- if (!pipe->readers) {
49000+ if (!atomic_read(&pipe->readers)) {
49001 send_sig(SIGPIPE, current, 0);
49002 if (!ret)
49003 ret = -EPIPE;
49004@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49005 do_wakeup = 0;
49006 }
49007
49008- pipe->waiting_writers++;
49009+ atomic_inc(&pipe->waiting_writers);
49010 pipe_wait(pipe);
49011- pipe->waiting_writers--;
49012+ atomic_dec(&pipe->waiting_writers);
49013 }
49014
49015 pipe_unlock(pipe);
49016@@ -559,7 +559,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
49017 old_fs = get_fs();
49018 set_fs(get_ds());
49019 /* The cast to a user pointer is valid due to the set_fs() */
49020- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
49021+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
49022 set_fs(old_fs);
49023
49024 return res;
49025@@ -574,7 +574,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
49026 old_fs = get_fs();
49027 set_fs(get_ds());
49028 /* The cast to a user pointer is valid due to the set_fs() */
49029- res = vfs_write(file, (const char __user *)buf, count, &pos);
49030+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
49031 set_fs(old_fs);
49032
49033 return res;
49034@@ -625,7 +625,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49035 goto err;
49036
49037 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
49038- vec[i].iov_base = (void __user *) page_address(page);
49039+ vec[i].iov_base = (void __force_user *) page_address(page);
49040 vec[i].iov_len = this_len;
49041 spd.pages[i] = page;
49042 spd.nr_pages++;
49043@@ -845,10 +845,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
49044 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
49045 {
49046 while (!pipe->nrbufs) {
49047- if (!pipe->writers)
49048+ if (!atomic_read(&pipe->writers))
49049 return 0;
49050
49051- if (!pipe->waiting_writers && sd->num_spliced)
49052+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
49053 return 0;
49054
49055 if (sd->flags & SPLICE_F_NONBLOCK)
49056@@ -1181,7 +1181,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
49057 * out of the pipe right after the splice_to_pipe(). So set
49058 * PIPE_READERS appropriately.
49059 */
49060- pipe->readers = 1;
49061+ atomic_set(&pipe->readers, 1);
49062
49063 current->splice_pipe = pipe;
49064 }
49065@@ -1733,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49066 ret = -ERESTARTSYS;
49067 break;
49068 }
49069- if (!pipe->writers)
49070+ if (!atomic_read(&pipe->writers))
49071 break;
49072- if (!pipe->waiting_writers) {
49073+ if (!atomic_read(&pipe->waiting_writers)) {
49074 if (flags & SPLICE_F_NONBLOCK) {
49075 ret = -EAGAIN;
49076 break;
49077@@ -1767,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49078 pipe_lock(pipe);
49079
49080 while (pipe->nrbufs >= pipe->buffers) {
49081- if (!pipe->readers) {
49082+ if (!atomic_read(&pipe->readers)) {
49083 send_sig(SIGPIPE, current, 0);
49084 ret = -EPIPE;
49085 break;
49086@@ -1780,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49087 ret = -ERESTARTSYS;
49088 break;
49089 }
49090- pipe->waiting_writers++;
49091+ atomic_inc(&pipe->waiting_writers);
49092 pipe_wait(pipe);
49093- pipe->waiting_writers--;
49094+ atomic_dec(&pipe->waiting_writers);
49095 }
49096
49097 pipe_unlock(pipe);
49098@@ -1818,14 +1818,14 @@ retry:
49099 pipe_double_lock(ipipe, opipe);
49100
49101 do {
49102- if (!opipe->readers) {
49103+ if (!atomic_read(&opipe->readers)) {
49104 send_sig(SIGPIPE, current, 0);
49105 if (!ret)
49106 ret = -EPIPE;
49107 break;
49108 }
49109
49110- if (!ipipe->nrbufs && !ipipe->writers)
49111+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
49112 break;
49113
49114 /*
49115@@ -1922,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49116 pipe_double_lock(ipipe, opipe);
49117
49118 do {
49119- if (!opipe->readers) {
49120+ if (!atomic_read(&opipe->readers)) {
49121 send_sig(SIGPIPE, current, 0);
49122 if (!ret)
49123 ret = -EPIPE;
49124@@ -1967,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49125 * return EAGAIN if we have the potential of some data in the
49126 * future, otherwise just return 0
49127 */
49128- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
49129+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
49130 ret = -EAGAIN;
49131
49132 pipe_unlock(ipipe);
49133diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
49134index a475983..9c6a1f0 100644
49135--- a/fs/sysfs/bin.c
49136+++ b/fs/sysfs/bin.c
49137@@ -67,6 +67,8 @@ fill_read(struct file *file, char *buffer, loff_t off, size_t count)
49138 }
49139
49140 static ssize_t
49141+read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off) __size_overflow(3);
49142+static ssize_t
49143 read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
49144 {
49145 struct bin_buffer *bb = file->private_data;
49146diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
49147index 7fdf6a7..e6cd8ad 100644
49148--- a/fs/sysfs/dir.c
49149+++ b/fs/sysfs/dir.c
49150@@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
49151 struct sysfs_dirent *sd;
49152 int rc;
49153
49154+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49155+ const char *parent_name = parent_sd->s_name;
49156+
49157+ mode = S_IFDIR | S_IRWXU;
49158+
49159+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
49160+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
49161+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
49162+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
49163+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
49164+#endif
49165+
49166 /* allocate */
49167 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
49168 if (!sd)
49169diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
49170index 00012e3..8392349 100644
49171--- a/fs/sysfs/file.c
49172+++ b/fs/sysfs/file.c
49173@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
49174
49175 struct sysfs_open_dirent {
49176 atomic_t refcnt;
49177- atomic_t event;
49178+ atomic_unchecked_t event;
49179 wait_queue_head_t poll;
49180 struct list_head buffers; /* goes through sysfs_buffer.list */
49181 };
49182@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
49183 if (!sysfs_get_active(attr_sd))
49184 return -ENODEV;
49185
49186- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
49187+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
49188 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
49189
49190 sysfs_put_active(attr_sd);
49191@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
49192 return -ENOMEM;
49193
49194 atomic_set(&new_od->refcnt, 0);
49195- atomic_set(&new_od->event, 1);
49196+ atomic_set_unchecked(&new_od->event, 1);
49197 init_waitqueue_head(&new_od->poll);
49198 INIT_LIST_HEAD(&new_od->buffers);
49199 goto retry;
49200@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
49201
49202 sysfs_put_active(attr_sd);
49203
49204- if (buffer->event != atomic_read(&od->event))
49205+ if (buffer->event != atomic_read_unchecked(&od->event))
49206 goto trigger;
49207
49208 return DEFAULT_POLLMASK;
49209@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
49210
49211 od = sd->s_attr.open;
49212 if (od) {
49213- atomic_inc(&od->event);
49214+ atomic_inc_unchecked(&od->event);
49215 wake_up_interruptible(&od->poll);
49216 }
49217
49218diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
49219index a7ac78f..02158e1 100644
49220--- a/fs/sysfs/symlink.c
49221+++ b/fs/sysfs/symlink.c
49222@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
49223
49224 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
49225 {
49226- char *page = nd_get_link(nd);
49227+ const char *page = nd_get_link(nd);
49228 if (!IS_ERR(page))
49229 free_page((unsigned long)page);
49230 }
49231diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
49232index f922cba..062fb02 100644
49233--- a/fs/ubifs/debug.c
49234+++ b/fs/ubifs/debug.c
49235@@ -2819,6 +2819,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *u, size_t count,
49236 * debugfs file. Returns %0 or %1 in case of success and a negative error code
49237 * in case of failure.
49238 */
49239+static int interpret_user_input(const char __user *u, size_t count) __size_overflow(2);
49240 static int interpret_user_input(const char __user *u, size_t count)
49241 {
49242 size_t buf_size;
49243@@ -2837,6 +2838,8 @@ static int interpret_user_input(const char __user *u, size_t count)
49244 }
49245
49246 static ssize_t dfs_file_write(struct file *file, const char __user *u,
49247+ size_t count, loff_t *ppos) __size_overflow(3);
49248+static ssize_t dfs_file_write(struct file *file, const char __user *u,
49249 size_t count, loff_t *ppos)
49250 {
49251 struct ubifs_info *c = file->private_data;
49252diff --git a/fs/udf/misc.c b/fs/udf/misc.c
49253index c175b4d..8f36a16 100644
49254--- a/fs/udf/misc.c
49255+++ b/fs/udf/misc.c
49256@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
49257
49258 u8 udf_tag_checksum(const struct tag *t)
49259 {
49260- u8 *data = (u8 *)t;
49261+ const u8 *data = (const u8 *)t;
49262 u8 checksum = 0;
49263 int i;
49264 for (i = 0; i < sizeof(struct tag); ++i)
49265diff --git a/fs/utimes.c b/fs/utimes.c
49266index ba653f3..06ea4b1 100644
49267--- a/fs/utimes.c
49268+++ b/fs/utimes.c
49269@@ -1,6 +1,7 @@
49270 #include <linux/compiler.h>
49271 #include <linux/file.h>
49272 #include <linux/fs.h>
49273+#include <linux/security.h>
49274 #include <linux/linkage.h>
49275 #include <linux/mount.h>
49276 #include <linux/namei.h>
49277@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
49278 goto mnt_drop_write_and_out;
49279 }
49280 }
49281+
49282+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
49283+ error = -EACCES;
49284+ goto mnt_drop_write_and_out;
49285+ }
49286+
49287 mutex_lock(&inode->i_mutex);
49288 error = notify_change(path->dentry, &newattrs);
49289 mutex_unlock(&inode->i_mutex);
49290diff --git a/fs/xattr.c b/fs/xattr.c
49291index 82f4337..236473c 100644
49292--- a/fs/xattr.c
49293+++ b/fs/xattr.c
49294@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
49295 * Extended attribute SET operations
49296 */
49297 static long
49298-setxattr(struct dentry *d, const char __user *name, const void __user *value,
49299+setxattr(struct path *path, const char __user *name, const void __user *value,
49300 size_t size, int flags)
49301 {
49302 int error;
49303@@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
49304 return PTR_ERR(kvalue);
49305 }
49306
49307- error = vfs_setxattr(d, kname, kvalue, size, flags);
49308+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
49309+ error = -EACCES;
49310+ goto out;
49311+ }
49312+
49313+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
49314+out:
49315 kfree(kvalue);
49316 return error;
49317 }
49318@@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
49319 return error;
49320 error = mnt_want_write(path.mnt);
49321 if (!error) {
49322- error = setxattr(path.dentry, name, value, size, flags);
49323+ error = setxattr(&path, name, value, size, flags);
49324 mnt_drop_write(path.mnt);
49325 }
49326 path_put(&path);
49327@@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
49328 return error;
49329 error = mnt_want_write(path.mnt);
49330 if (!error) {
49331- error = setxattr(path.dentry, name, value, size, flags);
49332+ error = setxattr(&path, name, value, size, flags);
49333 mnt_drop_write(path.mnt);
49334 }
49335 path_put(&path);
49336@@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
49337 const void __user *,value, size_t, size, int, flags)
49338 {
49339 struct file *f;
49340- struct dentry *dentry;
49341 int error = -EBADF;
49342
49343 f = fget(fd);
49344 if (!f)
49345 return error;
49346- dentry = f->f_path.dentry;
49347- audit_inode(NULL, dentry);
49348+ audit_inode(NULL, f->f_path.dentry);
49349 error = mnt_want_write_file(f);
49350 if (!error) {
49351- error = setxattr(dentry, name, value, size, flags);
49352+ error = setxattr(&f->f_path, name, value, size, flags);
49353 mnt_drop_write_file(f);
49354 }
49355 fput(f);
49356diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
49357index 8d5a506..7f62712 100644
49358--- a/fs/xattr_acl.c
49359+++ b/fs/xattr_acl.c
49360@@ -17,8 +17,8 @@
49361 struct posix_acl *
49362 posix_acl_from_xattr(const void *value, size_t size)
49363 {
49364- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
49365- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
49366+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
49367+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
49368 int count;
49369 struct posix_acl *acl;
49370 struct posix_acl_entry *acl_e;
49371diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
49372index 188ef2f..adcf864 100644
49373--- a/fs/xfs/xfs_bmap.c
49374+++ b/fs/xfs/xfs_bmap.c
49375@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
49376 int nmap,
49377 int ret_nmap);
49378 #else
49379-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
49380+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
49381 #endif /* DEBUG */
49382
49383 STATIC int
49384diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
49385index 79d05e8..e3e5861 100644
49386--- a/fs/xfs/xfs_dir2_sf.c
49387+++ b/fs/xfs/xfs_dir2_sf.c
49388@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
49389 }
49390
49391 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
49392- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49393+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
49394+ char name[sfep->namelen];
49395+ memcpy(name, sfep->name, sfep->namelen);
49396+ if (filldir(dirent, name, sfep->namelen,
49397+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
49398+ *offset = off & 0x7fffffff;
49399+ return 0;
49400+ }
49401+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49402 off & 0x7fffffff, ino, DT_UNKNOWN)) {
49403 *offset = off & 0x7fffffff;
49404 return 0;
49405diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
49406index 76f3ca5..f57f712 100644
49407--- a/fs/xfs/xfs_ioctl.c
49408+++ b/fs/xfs/xfs_ioctl.c
49409@@ -128,7 +128,7 @@ xfs_find_handle(
49410 }
49411
49412 error = -EFAULT;
49413- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
49414+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
49415 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
49416 goto out_put;
49417
49418diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
49419index ab30253..4d86958 100644
49420--- a/fs/xfs/xfs_iops.c
49421+++ b/fs/xfs/xfs_iops.c
49422@@ -447,7 +447,7 @@ xfs_vn_put_link(
49423 struct nameidata *nd,
49424 void *p)
49425 {
49426- char *s = nd_get_link(nd);
49427+ const char *s = nd_get_link(nd);
49428
49429 if (!IS_ERR(s))
49430 kfree(s);
49431diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
49432new file mode 100644
49433index 0000000..4089e05
49434--- /dev/null
49435+++ b/grsecurity/Kconfig
49436@@ -0,0 +1,1078 @@
49437+#
49438+# grecurity configuration
49439+#
49440+
49441+menu "Grsecurity"
49442+
49443+config GRKERNSEC
49444+ bool "Grsecurity"
49445+ select CRYPTO
49446+ select CRYPTO_SHA256
49447+ help
49448+ If you say Y here, you will be able to configure many features
49449+ that will enhance the security of your system. It is highly
49450+ recommended that you say Y here and read through the help
49451+ for each option so that you fully understand the features and
49452+ can evaluate their usefulness for your machine.
49453+
49454+choice
49455+ prompt "Security Level"
49456+ depends on GRKERNSEC
49457+ default GRKERNSEC_CUSTOM
49458+
49459+config GRKERNSEC_LOW
49460+ bool "Low"
49461+ select GRKERNSEC_LINK
49462+ select GRKERNSEC_FIFO
49463+ select GRKERNSEC_RANDNET
49464+ select GRKERNSEC_DMESG
49465+ select GRKERNSEC_CHROOT
49466+ select GRKERNSEC_CHROOT_CHDIR
49467+
49468+ help
49469+ If you choose this option, several of the grsecurity options will
49470+ be enabled that will give you greater protection against a number
49471+ of attacks, while assuring that none of your software will have any
49472+ conflicts with the additional security measures. If you run a lot
49473+ of unusual software, or you are having problems with the higher
49474+ security levels, you should say Y here. With this option, the
49475+ following features are enabled:
49476+
49477+ - Linking restrictions
49478+ - FIFO restrictions
49479+ - Restricted dmesg
49480+ - Enforced chdir("/") on chroot
49481+ - Runtime module disabling
49482+
49483+config GRKERNSEC_MEDIUM
49484+ bool "Medium"
49485+ select PAX
49486+ select PAX_EI_PAX
49487+ select PAX_PT_PAX_FLAGS
49488+ select PAX_HAVE_ACL_FLAGS
49489+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49490+ select GRKERNSEC_CHROOT
49491+ select GRKERNSEC_CHROOT_SYSCTL
49492+ select GRKERNSEC_LINK
49493+ select GRKERNSEC_FIFO
49494+ select GRKERNSEC_DMESG
49495+ select GRKERNSEC_RANDNET
49496+ select GRKERNSEC_FORKFAIL
49497+ select GRKERNSEC_TIME
49498+ select GRKERNSEC_SIGNAL
49499+ select GRKERNSEC_CHROOT
49500+ select GRKERNSEC_CHROOT_UNIX
49501+ select GRKERNSEC_CHROOT_MOUNT
49502+ select GRKERNSEC_CHROOT_PIVOT
49503+ select GRKERNSEC_CHROOT_DOUBLE
49504+ select GRKERNSEC_CHROOT_CHDIR
49505+ select GRKERNSEC_CHROOT_MKNOD
49506+ select GRKERNSEC_PROC
49507+ select GRKERNSEC_PROC_USERGROUP
49508+ select PAX_RANDUSTACK
49509+ select PAX_ASLR
49510+ select PAX_RANDMMAP
49511+ select PAX_REFCOUNT if (X86 || SPARC64)
49512+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49513+
49514+ help
49515+ If you say Y here, several features in addition to those included
49516+ in the low additional security level will be enabled. These
49517+ features provide even more security to your system, though in rare
49518+ cases they may be incompatible with very old or poorly written
49519+ software. If you enable this option, make sure that your auth
49520+ service (identd) is running as gid 1001. With this option,
49521+ the following features (in addition to those provided in the
49522+ low additional security level) will be enabled:
49523+
49524+ - Failed fork logging
49525+ - Time change logging
49526+ - Signal logging
49527+ - Deny mounts in chroot
49528+ - Deny double chrooting
49529+ - Deny sysctl writes in chroot
49530+ - Deny mknod in chroot
49531+ - Deny access to abstract AF_UNIX sockets out of chroot
49532+ - Deny pivot_root in chroot
49533+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
49534+ - /proc restrictions with special GID set to 10 (usually wheel)
49535+ - Address Space Layout Randomization (ASLR)
49536+ - Prevent exploitation of most refcount overflows
49537+ - Bounds checking of copying between the kernel and userland
49538+
49539+config GRKERNSEC_HIGH
49540+ bool "High"
49541+ select GRKERNSEC_LINK
49542+ select GRKERNSEC_FIFO
49543+ select GRKERNSEC_DMESG
49544+ select GRKERNSEC_FORKFAIL
49545+ select GRKERNSEC_TIME
49546+ select GRKERNSEC_SIGNAL
49547+ select GRKERNSEC_CHROOT
49548+ select GRKERNSEC_CHROOT_SHMAT
49549+ select GRKERNSEC_CHROOT_UNIX
49550+ select GRKERNSEC_CHROOT_MOUNT
49551+ select GRKERNSEC_CHROOT_FCHDIR
49552+ select GRKERNSEC_CHROOT_PIVOT
49553+ select GRKERNSEC_CHROOT_DOUBLE
49554+ select GRKERNSEC_CHROOT_CHDIR
49555+ select GRKERNSEC_CHROOT_MKNOD
49556+ select GRKERNSEC_CHROOT_CAPS
49557+ select GRKERNSEC_CHROOT_SYSCTL
49558+ select GRKERNSEC_CHROOT_FINDTASK
49559+ select GRKERNSEC_SYSFS_RESTRICT
49560+ select GRKERNSEC_PROC
49561+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49562+ select GRKERNSEC_HIDESYM
49563+ select GRKERNSEC_BRUTE
49564+ select GRKERNSEC_PROC_USERGROUP
49565+ select GRKERNSEC_KMEM
49566+ select GRKERNSEC_RESLOG
49567+ select GRKERNSEC_RANDNET
49568+ select GRKERNSEC_PROC_ADD
49569+ select GRKERNSEC_CHROOT_CHMOD
49570+ select GRKERNSEC_CHROOT_NICE
49571+ select GRKERNSEC_SETXID
49572+ select GRKERNSEC_AUDIT_MOUNT
49573+ select GRKERNSEC_MODHARDEN if (MODULES)
49574+ select GRKERNSEC_HARDEN_PTRACE
49575+ select GRKERNSEC_PTRACE_READEXEC
49576+ select GRKERNSEC_VM86 if (X86_32)
49577+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
49578+ select PAX
49579+ select PAX_RANDUSTACK
49580+ select PAX_ASLR
49581+ select PAX_RANDMMAP
49582+ select PAX_NOEXEC
49583+ select PAX_MPROTECT
49584+ select PAX_EI_PAX
49585+ select PAX_PT_PAX_FLAGS
49586+ select PAX_HAVE_ACL_FLAGS
49587+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
49588+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
49589+ select PAX_RANDKSTACK if (X86_TSC && X86)
49590+ select PAX_SEGMEXEC if (X86_32)
49591+ select PAX_PAGEEXEC
49592+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
49593+ select PAX_EMUTRAMP if (PARISC)
49594+ select PAX_EMUSIGRT if (PARISC)
49595+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
49596+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
49597+ select PAX_REFCOUNT if (X86 || SPARC64)
49598+ select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
49599+ help
49600+ If you say Y here, many of the features of grsecurity will be
49601+ enabled, which will protect you against many kinds of attacks
49602+ against your system. The heightened security comes at a cost
49603+ of an increased chance of incompatibilities with rare software
49604+ on your machine. Since this security level enables PaX, you should
49605+ view <http://pax.grsecurity.net> and read about the PaX
49606+ project. While you are there, download chpax and run it on
49607+ binaries that cause problems with PaX. Also remember that
49608+ since the /proc restrictions are enabled, you must run your
49609+ identd as gid 1001. This security level enables the following
49610+ features in addition to those listed in the low and medium
49611+ security levels:
49612+
49613+ - Additional /proc restrictions
49614+ - Chmod restrictions in chroot
49615+ - No signals, ptrace, or viewing of processes outside of chroot
49616+ - Capability restrictions in chroot
49617+ - Deny fchdir out of chroot
49618+ - Priority restrictions in chroot
49619+ - Segmentation-based implementation of PaX
49620+ - Mprotect restrictions
49621+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
49622+ - Kernel stack randomization
49623+ - Mount/unmount/remount logging
49624+ - Kernel symbol hiding
49625+ - Hardening of module auto-loading
49626+ - Ptrace restrictions
49627+ - Restricted vm86 mode
49628+ - Restricted sysfs/debugfs
49629+ - Active kernel exploit response
49630+
49631+config GRKERNSEC_CUSTOM
49632+ bool "Custom"
49633+ help
49634+ If you say Y here, you will be able to configure every grsecurity
49635+ option, which allows you to enable many more features that aren't
49636+ covered in the basic security levels. These additional features
49637+ include TPE, socket restrictions, and the sysctl system for
49638+ grsecurity. It is advised that you read through the help for
49639+ each option to determine its usefulness in your situation.
49640+
49641+endchoice
49642+
49643+menu "Memory Protections"
49644+depends on GRKERNSEC
49645+
49646+config GRKERNSEC_KMEM
49647+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
49648+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49649+ help
49650+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49651+ be written to or read from to modify or leak the contents of the running
49652+ kernel. /dev/port will also not be allowed to be opened. If you have module
49653+ support disabled, enabling this will close up four ways that are
49654+ currently used to insert malicious code into the running kernel.
49655+ Even with all these features enabled, we still highly recommend that
49656+ you use the RBAC system, as it is still possible for an attacker to
49657+ modify the running kernel through privileged I/O granted by ioperm/iopl.
49658+ If you are not using XFree86, you may be able to stop this additional
49659+ case by enabling the 'Disable privileged I/O' option. Though nothing
49660+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49661+ but only to video memory, which is the only writing we allow in this
49662+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49663+ not be allowed to mprotect it with PROT_WRITE later.
49664+ It is highly recommended that you say Y here if you meet all the
49665+ conditions above.
49666+
49667+config GRKERNSEC_VM86
49668+ bool "Restrict VM86 mode"
49669+ depends on X86_32
49670+
49671+ help
49672+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49673+ make use of a special execution mode on 32bit x86 processors called
49674+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49675+ video cards and will still work with this option enabled. The purpose
49676+ of the option is to prevent exploitation of emulation errors in
49677+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
49678+ Nearly all users should be able to enable this option.
49679+
49680+config GRKERNSEC_IO
49681+ bool "Disable privileged I/O"
49682+ depends on X86
49683+ select RTC_CLASS
49684+ select RTC_INTF_DEV
49685+ select RTC_DRV_CMOS
49686+
49687+ help
49688+ If you say Y here, all ioperm and iopl calls will return an error.
49689+ Ioperm and iopl can be used to modify the running kernel.
49690+ Unfortunately, some programs need this access to operate properly,
49691+ the most notable of which are XFree86 and hwclock. hwclock can be
49692+ remedied by having RTC support in the kernel, so real-time
49693+ clock support is enabled if this option is enabled, to ensure
49694+ that hwclock operates correctly. XFree86 still will not
49695+ operate correctly with this option enabled, so DO NOT CHOOSE Y
49696+ IF YOU USE XFree86. If you use XFree86 and you still want to
49697+ protect your kernel against modification, use the RBAC system.
49698+
49699+config GRKERNSEC_PROC_MEMMAP
49700+ bool "Harden ASLR against information leaks and entropy reduction"
49701+ default y if (PAX_NOEXEC || PAX_ASLR)
49702+ depends on PAX_NOEXEC || PAX_ASLR
49703+ help
49704+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49705+ give no information about the addresses of its mappings if
49706+ PaX features that rely on random addresses are enabled on the task.
49707+ In addition to sanitizing this information and disabling other
49708+ dangerous sources of information, this option causes reads of sensitive
49709+ /proc/<pid> entries where the file descriptor was opened in a different
49710+ task than the one performing the read. Such attempts are logged.
49711+ This option also limits argv/env strings for suid/sgid binaries
49712+ to 512KB to prevent a complete exhaustion of the stack entropy provided
49713+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
49714+ binaries to prevent alternative mmap layouts from being abused.
49715+
49716+ If you use PaX it is essential that you say Y here as it closes up
49717+ several holes that make full ASLR useless locally.
49718+
49719+config GRKERNSEC_BRUTE
49720+ bool "Deter exploit bruteforcing"
49721+ help
49722+ If you say Y here, attempts to bruteforce exploits against forking
49723+ daemons such as apache or sshd, as well as against suid/sgid binaries
49724+ will be deterred. When a child of a forking daemon is killed by PaX
49725+ or crashes due to an illegal instruction or other suspicious signal,
49726+ the parent process will be delayed 30 seconds upon every subsequent
49727+ fork until the administrator is able to assess the situation and
49728+ restart the daemon.
49729+ In the suid/sgid case, the attempt is logged, the user has all their
49730+ processes terminated, and they are prevented from executing any further
49731+ processes for 15 minutes.
49732+ It is recommended that you also enable signal logging in the auditing
49733+ section so that logs are generated when a process triggers a suspicious
49734+ signal.
49735+ If the sysctl option is enabled, a sysctl option with name
49736+ "deter_bruteforce" is created.
49737+
49738+
49739+config GRKERNSEC_MODHARDEN
49740+ bool "Harden module auto-loading"
49741+ depends on MODULES
49742+ help
49743+ If you say Y here, module auto-loading in response to use of some
49744+ feature implemented by an unloaded module will be restricted to
49745+ root users. Enabling this option helps defend against attacks
49746+ by unprivileged users who abuse the auto-loading behavior to
49747+ cause a vulnerable module to load that is then exploited.
49748+
49749+ If this option prevents a legitimate use of auto-loading for a
49750+ non-root user, the administrator can execute modprobe manually
49751+ with the exact name of the module mentioned in the alert log.
49752+ Alternatively, the administrator can add the module to the list
49753+ of modules loaded at boot by modifying init scripts.
49754+
49755+ Modification of init scripts will most likely be needed on
49756+ Ubuntu servers with encrypted home directory support enabled,
49757+ as the first non-root user logging in will cause the ecb(aes),
49758+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
49759+
49760+config GRKERNSEC_HIDESYM
49761+ bool "Hide kernel symbols"
49762+ help
49763+ If you say Y here, getting information on loaded modules, and
49764+ displaying all kernel symbols through a syscall will be restricted
49765+ to users with CAP_SYS_MODULE. For software compatibility reasons,
49766+ /proc/kallsyms will be restricted to the root user. The RBAC
49767+ system can hide that entry even from root.
49768+
49769+ This option also prevents leaking of kernel addresses through
49770+ several /proc entries.
49771+
49772+ Note that this option is only effective provided the following
49773+ conditions are met:
49774+ 1) The kernel using grsecurity is not precompiled by some distribution
49775+ 2) You have also enabled GRKERNSEC_DMESG
49776+ 3) You are using the RBAC system and hiding other files such as your
49777+ kernel image and System.map. Alternatively, enabling this option
49778+ causes the permissions on /boot, /lib/modules, and the kernel
49779+ source directory to change at compile time to prevent
49780+ reading by non-root users.
49781+ If the above conditions are met, this option will aid in providing a
49782+ useful protection against local kernel exploitation of overflows
49783+ and arbitrary read/write vulnerabilities.
49784+
49785+config GRKERNSEC_KERN_LOCKOUT
49786+ bool "Active kernel exploit response"
49787+ depends on X86 || ARM || PPC || SPARC
49788+ help
49789+ If you say Y here, when a PaX alert is triggered due to suspicious
49790+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49791+ or an OOPs occurs due to bad memory accesses, instead of just
49792+ terminating the offending process (and potentially allowing
49793+ a subsequent exploit from the same user), we will take one of two
49794+ actions:
49795+ If the user was root, we will panic the system
49796+ If the user was non-root, we will log the attempt, terminate
49797+ all processes owned by the user, then prevent them from creating
49798+ any new processes until the system is restarted
49799+ This deters repeated kernel exploitation/bruteforcing attempts
49800+ and is useful for later forensics.
49801+
49802+endmenu
49803+menu "Role Based Access Control Options"
49804+depends on GRKERNSEC
49805+
49806+config GRKERNSEC_RBAC_DEBUG
49807+ bool
49808+
49809+config GRKERNSEC_NO_RBAC
49810+ bool "Disable RBAC system"
49811+ help
49812+ If you say Y here, the /dev/grsec device will be removed from the kernel,
49813+ preventing the RBAC system from being enabled. You should only say Y
49814+ here if you have no intention of using the RBAC system, so as to prevent
49815+ an attacker with root access from misusing the RBAC system to hide files
49816+ and processes when loadable module support and /dev/[k]mem have been
49817+ locked down.
49818+
49819+config GRKERNSEC_ACL_HIDEKERN
49820+ bool "Hide kernel processes"
49821+ help
49822+ If you say Y here, all kernel threads will be hidden to all
49823+ processes but those whose subject has the "view hidden processes"
49824+ flag.
49825+
49826+config GRKERNSEC_ACL_MAXTRIES
49827+ int "Maximum tries before password lockout"
49828+ default 3
49829+ help
49830+ This option enforces the maximum number of times a user can attempt
49831+ to authorize themselves with the grsecurity RBAC system before being
49832+ denied the ability to attempt authorization again for a specified time.
49833+ The lower the number, the harder it will be to brute-force a password.
49834+
49835+config GRKERNSEC_ACL_TIMEOUT
49836+ int "Time to wait after max password tries, in seconds"
49837+ default 30
49838+ help
49839+ This option specifies the time the user must wait after attempting to
49840+ authorize to the RBAC system with the maximum number of invalid
49841+ passwords. The higher the number, the harder it will be to brute-force
49842+ a password.
49843+
49844+endmenu
49845+menu "Filesystem Protections"
49846+depends on GRKERNSEC
49847+
49848+config GRKERNSEC_PROC
49849+ bool "Proc restrictions"
49850+ help
49851+ If you say Y here, the permissions of the /proc filesystem
49852+ will be altered to enhance system security and privacy. You MUST
49853+ choose either a user only restriction or a user and group restriction.
49854+ Depending upon the option you choose, you can either restrict users to
49855+ see only the processes they themselves run, or choose a group that can
49856+ view all processes and files normally restricted to root if you choose
49857+ the "restrict to user only" option. NOTE: If you're running identd or
49858+ ntpd as a non-root user, you will have to run it as the group you
49859+ specify here.
49860+
49861+config GRKERNSEC_PROC_USER
49862+ bool "Restrict /proc to user only"
49863+ depends on GRKERNSEC_PROC
49864+ help
49865+ If you say Y here, non-root users will only be able to view their own
49866+ processes, and restricts them from viewing network-related information,
49867+ and viewing kernel symbol and module information.
49868+
49869+config GRKERNSEC_PROC_USERGROUP
49870+ bool "Allow special group"
49871+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
49872+ help
49873+ If you say Y here, you will be able to select a group that will be
49874+ able to view all processes and network-related information. If you've
49875+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
49876+ remain hidden. This option is useful if you want to run identd as
49877+ a non-root user.
49878+
49879+config GRKERNSEC_PROC_GID
49880+ int "GID for special group"
49881+ depends on GRKERNSEC_PROC_USERGROUP
49882+ default 1001
49883+
49884+config GRKERNSEC_PROC_ADD
49885+ bool "Additional restrictions"
49886+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
49887+ help
49888+ If you say Y here, additional restrictions will be placed on
49889+ /proc that keep normal users from viewing device information and
49890+ slabinfo information that could be useful for exploits.
49891+
49892+config GRKERNSEC_LINK
49893+ bool "Linking restrictions"
49894+ help
49895+ If you say Y here, /tmp race exploits will be prevented, since users
49896+ will no longer be able to follow symlinks owned by other users in
49897+ world-writable +t directories (e.g. /tmp), unless the owner of the
49898+ symlink is the owner of the directory. users will also not be
49899+ able to hardlink to files they do not own. If the sysctl option is
49900+ enabled, a sysctl option with name "linking_restrictions" is created.
49901+
49902+config GRKERNSEC_FIFO
49903+ bool "FIFO restrictions"
49904+ help
49905+ If you say Y here, users will not be able to write to FIFOs they don't
49906+ own in world-writable +t directories (e.g. /tmp), unless the owner of
49907+ the FIFO is the same owner of the directory it's held in. If the sysctl
49908+ option is enabled, a sysctl option with name "fifo_restrictions" is
49909+ created.
49910+
49911+config GRKERNSEC_SYSFS_RESTRICT
49912+ bool "Sysfs/debugfs restriction"
49913+ depends on SYSFS
49914+ help
49915+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
49916+ any filesystem normally mounted under it (e.g. debugfs) will be
49917+ mostly accessible only by root. These filesystems generally provide access
49918+ to hardware and debug information that isn't appropriate for unprivileged
49919+ users of the system. Sysfs and debugfs have also become a large source
49920+ of new vulnerabilities, ranging from infoleaks to local compromise.
49921+ There has been very little oversight with an eye toward security involved
49922+ in adding new exporters of information to these filesystems, so their
49923+ use is discouraged.
49924+ For reasons of compatibility, a few directories have been whitelisted
49925+ for access by non-root users:
49926+ /sys/fs/selinux
49927+ /sys/fs/fuse
49928+ /sys/devices/system/cpu
49929+
49930+config GRKERNSEC_ROFS
49931+ bool "Runtime read-only mount protection"
49932+ help
49933+ If you say Y here, a sysctl option with name "romount_protect" will
49934+ be created. By setting this option to 1 at runtime, filesystems
49935+ will be protected in the following ways:
49936+ * No new writable mounts will be allowed
49937+ * Existing read-only mounts won't be able to be remounted read/write
49938+ * Write operations will be denied on all block devices
49939+ This option acts independently of grsec_lock: once it is set to 1,
49940+ it cannot be turned off. Therefore, please be mindful of the resulting
49941+ behavior if this option is enabled in an init script on a read-only
49942+ filesystem. This feature is mainly intended for secure embedded systems.
49943+
49944+config GRKERNSEC_CHROOT
49945+ bool "Chroot jail restrictions"
49946+ help
49947+ If you say Y here, you will be able to choose several options that will
49948+ make breaking out of a chrooted jail much more difficult. If you
49949+ encounter no software incompatibilities with the following options, it
49950+ is recommended that you enable each one.
49951+
49952+config GRKERNSEC_CHROOT_MOUNT
49953+ bool "Deny mounts"
49954+ depends on GRKERNSEC_CHROOT
49955+ help
49956+ If you say Y here, processes inside a chroot will not be able to
49957+ mount or remount filesystems. If the sysctl option is enabled, a
49958+ sysctl option with name "chroot_deny_mount" is created.
49959+
49960+config GRKERNSEC_CHROOT_DOUBLE
49961+ bool "Deny double-chroots"
49962+ depends on GRKERNSEC_CHROOT
49963+ help
49964+ If you say Y here, processes inside a chroot will not be able to chroot
49965+ again outside the chroot. This is a widely used method of breaking
49966+ out of a chroot jail and should not be allowed. If the sysctl
49967+ option is enabled, a sysctl option with name
49968+ "chroot_deny_chroot" is created.
49969+
49970+config GRKERNSEC_CHROOT_PIVOT
49971+ bool "Deny pivot_root in chroot"
49972+ depends on GRKERNSEC_CHROOT
49973+ help
49974+ If you say Y here, processes inside a chroot will not be able to use
49975+ a function called pivot_root() that was introduced in Linux 2.3.41. It
49976+ works similar to chroot in that it changes the root filesystem. This
49977+ function could be misused in a chrooted process to attempt to break out
49978+ of the chroot, and therefore should not be allowed. If the sysctl
49979+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
49980+ created.
49981+
49982+config GRKERNSEC_CHROOT_CHDIR
49983+ bool "Enforce chdir(\"/\") on all chroots"
49984+ depends on GRKERNSEC_CHROOT
49985+ help
49986+ If you say Y here, the current working directory of all newly-chrooted
49987+ applications will be set to the the root directory of the chroot.
49988+ The man page on chroot(2) states:
49989+ Note that this call does not change the current working
49990+ directory, so that `.' can be outside the tree rooted at
49991+ `/'. In particular, the super-user can escape from a
49992+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
49993+
49994+ It is recommended that you say Y here, since it's not known to break
49995+ any software. If the sysctl option is enabled, a sysctl option with
49996+ name "chroot_enforce_chdir" is created.
49997+
49998+config GRKERNSEC_CHROOT_CHMOD
49999+ bool "Deny (f)chmod +s"
50000+ depends on GRKERNSEC_CHROOT
50001+ help
50002+ If you say Y here, processes inside a chroot will not be able to chmod
50003+ or fchmod files to make them have suid or sgid bits. This protects
50004+ against another published method of breaking a chroot. If the sysctl
50005+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
50006+ created.
50007+
50008+config GRKERNSEC_CHROOT_FCHDIR
50009+ bool "Deny fchdir out of chroot"
50010+ depends on GRKERNSEC_CHROOT
50011+ help
50012+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
50013+ to a file descriptor of the chrooting process that points to a directory
50014+ outside the filesystem will be stopped. If the sysctl option
50015+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50016+
50017+config GRKERNSEC_CHROOT_MKNOD
50018+ bool "Deny mknod"
50019+ depends on GRKERNSEC_CHROOT
50020+ help
50021+ If you say Y here, processes inside a chroot will not be allowed to
50022+ mknod. The problem with using mknod inside a chroot is that it
50023+ would allow an attacker to create a device entry that is the same
50024+ as one on the physical root of your system, which could range from
50025+ anything from the console device to a device for your harddrive (which
50026+ they could then use to wipe the drive or steal data). It is recommended
50027+ that you say Y here, unless you run into software incompatibilities.
50028+ If the sysctl option is enabled, a sysctl option with name
50029+ "chroot_deny_mknod" is created.
50030+
50031+config GRKERNSEC_CHROOT_SHMAT
50032+ bool "Deny shmat() out of chroot"
50033+ depends on GRKERNSEC_CHROOT
50034+ help
50035+ If you say Y here, processes inside a chroot will not be able to attach
50036+ to shared memory segments that were created outside of the chroot jail.
50037+ It is recommended that you say Y here. If the sysctl option is enabled,
50038+ a sysctl option with name "chroot_deny_shmat" is created.
50039+
50040+config GRKERNSEC_CHROOT_UNIX
50041+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
50042+ depends on GRKERNSEC_CHROOT
50043+ help
50044+ If you say Y here, processes inside a chroot will not be able to
50045+ connect to abstract (meaning not belonging to a filesystem) Unix
50046+ domain sockets that were bound outside of a chroot. It is recommended
50047+ that you say Y here. If the sysctl option is enabled, a sysctl option
50048+ with name "chroot_deny_unix" is created.
50049+
50050+config GRKERNSEC_CHROOT_FINDTASK
50051+ bool "Protect outside processes"
50052+ depends on GRKERNSEC_CHROOT
50053+ help
50054+ If you say Y here, processes inside a chroot will not be able to
50055+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50056+ getsid, or view any process outside of the chroot. If the sysctl
50057+ option is enabled, a sysctl option with name "chroot_findtask" is
50058+ created.
50059+
50060+config GRKERNSEC_CHROOT_NICE
50061+ bool "Restrict priority changes"
50062+ depends on GRKERNSEC_CHROOT
50063+ help
50064+ If you say Y here, processes inside a chroot will not be able to raise
50065+ the priority of processes in the chroot, or alter the priority of
50066+ processes outside the chroot. This provides more security than simply
50067+ removing CAP_SYS_NICE from the process' capability set. If the
50068+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50069+ is created.
50070+
50071+config GRKERNSEC_CHROOT_SYSCTL
50072+ bool "Deny sysctl writes"
50073+ depends on GRKERNSEC_CHROOT
50074+ help
50075+ If you say Y here, an attacker in a chroot will not be able to
50076+ write to sysctl entries, either by sysctl(2) or through a /proc
50077+ interface. It is strongly recommended that you say Y here. If the
50078+ sysctl option is enabled, a sysctl option with name
50079+ "chroot_deny_sysctl" is created.
50080+
50081+config GRKERNSEC_CHROOT_CAPS
50082+ bool "Capability restrictions"
50083+ depends on GRKERNSEC_CHROOT
50084+ help
50085+ If you say Y here, the capabilities on all processes within a
50086+ chroot jail will be lowered to stop module insertion, raw i/o,
50087+ system and net admin tasks, rebooting the system, modifying immutable
50088+ files, modifying IPC owned by another, and changing the system time.
50089+ This is left an option because it can break some apps. Disable this
50090+ if your chrooted apps are having problems performing those kinds of
50091+ tasks. If the sysctl option is enabled, a sysctl option with
50092+ name "chroot_caps" is created.
50093+
50094+endmenu
50095+menu "Kernel Auditing"
50096+depends on GRKERNSEC
50097+
50098+config GRKERNSEC_AUDIT_GROUP
50099+ bool "Single group for auditing"
50100+ help
50101+ If you say Y here, the exec, chdir, and (un)mount logging features
50102+ will only operate on a group you specify. This option is recommended
50103+ if you only want to watch certain users instead of having a large
50104+ amount of logs from the entire system. If the sysctl option is enabled,
50105+ a sysctl option with name "audit_group" is created.
50106+
50107+config GRKERNSEC_AUDIT_GID
50108+ int "GID for auditing"
50109+ depends on GRKERNSEC_AUDIT_GROUP
50110+ default 1007
50111+
50112+config GRKERNSEC_EXECLOG
50113+ bool "Exec logging"
50114+ help
50115+ If you say Y here, all execve() calls will be logged (since the
50116+ other exec*() calls are frontends to execve(), all execution
50117+ will be logged). Useful for shell-servers that like to keep track
50118+ of their users. If the sysctl option is enabled, a sysctl option with
50119+ name "exec_logging" is created.
50120+ WARNING: This option when enabled will produce a LOT of logs, especially
50121+ on an active system.
50122+
50123+config GRKERNSEC_RESLOG
50124+ bool "Resource logging"
50125+ help
50126+ If you say Y here, all attempts to overstep resource limits will
50127+ be logged with the resource name, the requested size, and the current
50128+ limit. It is highly recommended that you say Y here. If the sysctl
50129+ option is enabled, a sysctl option with name "resource_logging" is
50130+ created. If the RBAC system is enabled, the sysctl value is ignored.
50131+
50132+config GRKERNSEC_CHROOT_EXECLOG
50133+ bool "Log execs within chroot"
50134+ help
50135+ If you say Y here, all executions inside a chroot jail will be logged
50136+ to syslog. This can cause a large amount of logs if certain
50137+ applications (eg. djb's daemontools) are installed on the system, and
50138+ is therefore left as an option. If the sysctl option is enabled, a
50139+ sysctl option with name "chroot_execlog" is created.
50140+
50141+config GRKERNSEC_AUDIT_PTRACE
50142+ bool "Ptrace logging"
50143+ help
50144+ If you say Y here, all attempts to attach to a process via ptrace
50145+ will be logged. If the sysctl option is enabled, a sysctl option
50146+ with name "audit_ptrace" is created.
50147+
50148+config GRKERNSEC_AUDIT_CHDIR
50149+ bool "Chdir logging"
50150+ help
50151+ If you say Y here, all chdir() calls will be logged. If the sysctl
50152+ option is enabled, a sysctl option with name "audit_chdir" is created.
50153+
50154+config GRKERNSEC_AUDIT_MOUNT
50155+ bool "(Un)Mount logging"
50156+ help
50157+ If you say Y here, all mounts and unmounts will be logged. If the
50158+ sysctl option is enabled, a sysctl option with name "audit_mount" is
50159+ created.
50160+
50161+config GRKERNSEC_SIGNAL
50162+ bool "Signal logging"
50163+ help
50164+ If you say Y here, certain important signals will be logged, such as
50165+ SIGSEGV, which will as a result inform you of when a error in a program
50166+ occurred, which in some cases could mean a possible exploit attempt.
50167+ If the sysctl option is enabled, a sysctl option with name
50168+ "signal_logging" is created.
50169+
50170+config GRKERNSEC_FORKFAIL
50171+ bool "Fork failure logging"
50172+ help
50173+ If you say Y here, all failed fork() attempts will be logged.
50174+ This could suggest a fork bomb, or someone attempting to overstep
50175+ their process limit. If the sysctl option is enabled, a sysctl option
50176+ with name "forkfail_logging" is created.
50177+
50178+config GRKERNSEC_TIME
50179+ bool "Time change logging"
50180+ help
50181+ If you say Y here, any changes of the system clock will be logged.
50182+ If the sysctl option is enabled, a sysctl option with name
50183+ "timechange_logging" is created.
50184+
50185+config GRKERNSEC_PROC_IPADDR
50186+ bool "/proc/<pid>/ipaddr support"
50187+ help
50188+ If you say Y here, a new entry will be added to each /proc/<pid>
50189+ directory that contains the IP address of the person using the task.
50190+ The IP is carried across local TCP and AF_UNIX stream sockets.
50191+ This information can be useful for IDS/IPSes to perform remote response
50192+ to a local attack. The entry is readable by only the owner of the
50193+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50194+ the RBAC system), and thus does not create privacy concerns.
50195+
50196+config GRKERNSEC_RWXMAP_LOG
50197+ bool 'Denied RWX mmap/mprotect logging'
50198+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50199+ help
50200+ If you say Y here, calls to mmap() and mprotect() with explicit
50201+ usage of PROT_WRITE and PROT_EXEC together will be logged when
50202+ denied by the PAX_MPROTECT feature. If the sysctl option is
50203+ enabled, a sysctl option with name "rwxmap_logging" is created.
50204+
50205+config GRKERNSEC_AUDIT_TEXTREL
50206+ bool 'ELF text relocations logging (READ HELP)'
50207+ depends on PAX_MPROTECT
50208+ help
50209+ If you say Y here, text relocations will be logged with the filename
50210+ of the offending library or binary. The purpose of the feature is
50211+ to help Linux distribution developers get rid of libraries and
50212+ binaries that need text relocations which hinder the future progress
50213+ of PaX. Only Linux distribution developers should say Y here, and
50214+ never on a production machine, as this option creates an information
50215+ leak that could aid an attacker in defeating the randomization of
50216+ a single memory region. If the sysctl option is enabled, a sysctl
50217+ option with name "audit_textrel" is created.
50218+
50219+endmenu
50220+
50221+menu "Executable Protections"
50222+depends on GRKERNSEC
50223+
50224+config GRKERNSEC_DMESG
50225+ bool "Dmesg(8) restriction"
50226+ help
50227+ If you say Y here, non-root users will not be able to use dmesg(8)
50228+ to view up to the last 4kb of messages in the kernel's log buffer.
50229+ The kernel's log buffer often contains kernel addresses and other
50230+ identifying information useful to an attacker in fingerprinting a
50231+ system for a targeted exploit.
50232+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
50233+ created.
50234+
50235+config GRKERNSEC_HARDEN_PTRACE
50236+ bool "Deter ptrace-based process snooping"
50237+ help
50238+ If you say Y here, TTY sniffers and other malicious monitoring
50239+ programs implemented through ptrace will be defeated. If you
50240+ have been using the RBAC system, this option has already been
50241+ enabled for several years for all users, with the ability to make
50242+ fine-grained exceptions.
50243+
50244+ This option only affects the ability of non-root users to ptrace
50245+ processes that are not a descendent of the ptracing process.
50246+ This means that strace ./binary and gdb ./binary will still work,
50247+ but attaching to arbitrary processes will not. If the sysctl
50248+ option is enabled, a sysctl option with name "harden_ptrace" is
50249+ created.
50250+
50251+config GRKERNSEC_PTRACE_READEXEC
50252+ bool "Require read access to ptrace sensitive binaries"
50253+ help
50254+ If you say Y here, unprivileged users will not be able to ptrace unreadable
50255+ binaries. This option is useful in environments that
50256+ remove the read bits (e.g. file mode 4711) from suid binaries to
50257+ prevent infoleaking of their contents. This option adds
50258+ consistency to the use of that file mode, as the binary could normally
50259+ be read out when run without privileges while ptracing.
50260+
50261+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
50262+ is created.
50263+
50264+config GRKERNSEC_SETXID
50265+ bool "Enforce consistent multithreaded privileges"
50266+ help
50267+ If you say Y here, a change from a root uid to a non-root uid
50268+ in a multithreaded application will cause the resulting uids,
50269+ gids, supplementary groups, and capabilities in that thread
50270+ to be propagated to the other threads of the process. In most
50271+ cases this is unnecessary, as glibc will emulate this behavior
50272+ on behalf of the application. Other libcs do not act in the
50273+ same way, allowing the other threads of the process to continue
50274+ running with root privileges. If the sysctl option is enabled,
50275+ a sysctl option with name "consistent_setxid" is created.
50276+
50277+config GRKERNSEC_TPE
50278+ bool "Trusted Path Execution (TPE)"
50279+ help
50280+ If you say Y here, you will be able to choose a gid to add to the
50281+ supplementary groups of users you want to mark as "untrusted."
50282+ These users will not be able to execute any files that are not in
50283+ root-owned directories writable only by root. If the sysctl option
50284+ is enabled, a sysctl option with name "tpe" is created.
50285+
50286+config GRKERNSEC_TPE_ALL
50287+ bool "Partially restrict all non-root users"
50288+ depends on GRKERNSEC_TPE
50289+ help
50290+ If you say Y here, all non-root users will be covered under
50291+ a weaker TPE restriction. This is separate from, and in addition to,
50292+ the main TPE options that you have selected elsewhere. Thus, if a
50293+ "trusted" GID is chosen, this restriction applies to even that GID.
50294+ Under this restriction, all non-root users will only be allowed to
50295+ execute files in directories they own that are not group or
50296+ world-writable, or in directories owned by root and writable only by
50297+ root. If the sysctl option is enabled, a sysctl option with name
50298+ "tpe_restrict_all" is created.
50299+
50300+config GRKERNSEC_TPE_INVERT
50301+ bool "Invert GID option"
50302+ depends on GRKERNSEC_TPE
50303+ help
50304+ If you say Y here, the group you specify in the TPE configuration will
50305+ decide what group TPE restrictions will be *disabled* for. This
50306+ option is useful if you want TPE restrictions to be applied to most
50307+ users on the system. If the sysctl option is enabled, a sysctl option
50308+ with name "tpe_invert" is created. Unlike other sysctl options, this
50309+ entry will default to on for backward-compatibility.
50310+
50311+config GRKERNSEC_TPE_GID
50312+ int "GID for untrusted users"
50313+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50314+ default 1005
50315+ help
50316+ Setting this GID determines what group TPE restrictions will be
50317+ *enabled* for. If the sysctl option is enabled, a sysctl option
50318+ with name "tpe_gid" is created.
50319+
50320+config GRKERNSEC_TPE_GID
50321+ int "GID for trusted users"
50322+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50323+ default 1005
50324+ help
50325+ Setting this GID determines what group TPE restrictions will be
50326+ *disabled* for. If the sysctl option is enabled, a sysctl option
50327+ with name "tpe_gid" is created.
50328+
50329+endmenu
50330+menu "Network Protections"
50331+depends on GRKERNSEC
50332+
50333+config GRKERNSEC_RANDNET
50334+ bool "Larger entropy pools"
50335+ help
50336+ If you say Y here, the entropy pools used for many features of Linux
50337+ and grsecurity will be doubled in size. Since several grsecurity
50338+ features use additional randomness, it is recommended that you say Y
50339+ here. Saying Y here has a similar effect as modifying
50340+ /proc/sys/kernel/random/poolsize.
50341+
50342+config GRKERNSEC_BLACKHOLE
50343+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50344+ depends on NET
50345+ help
50346+ If you say Y here, neither TCP resets nor ICMP
50347+ destination-unreachable packets will be sent in response to packets
50348+ sent to ports for which no associated listening process exists.
50349+ This feature supports both IPV4 and IPV6 and exempts the
50350+ loopback interface from blackholing. Enabling this feature
50351+ makes a host more resilient to DoS attacks and reduces network
50352+ visibility against scanners.
50353+
50354+ The blackhole feature as-implemented is equivalent to the FreeBSD
50355+ blackhole feature, as it prevents RST responses to all packets, not
50356+ just SYNs. Under most application behavior this causes no
50357+ problems, but applications (like haproxy) may not close certain
50358+ connections in a way that cleanly terminates them on the remote
50359+ end, leaving the remote host in LAST_ACK state. Because of this
50360+ side-effect and to prevent intentional LAST_ACK DoSes, this
50361+ feature also adds automatic mitigation against such attacks.
50362+ The mitigation drastically reduces the amount of time a socket
50363+ can spend in LAST_ACK state. If you're using haproxy and not
50364+ all servers it connects to have this option enabled, consider
50365+ disabling this feature on the haproxy host.
50366+
50367+ If the sysctl option is enabled, two sysctl options with names
50368+ "ip_blackhole" and "lastack_retries" will be created.
50369+ While "ip_blackhole" takes the standard zero/non-zero on/off
50370+ toggle, "lastack_retries" uses the same kinds of values as
50371+ "tcp_retries1" and "tcp_retries2". The default value of 4
50372+ prevents a socket from lasting more than 45 seconds in LAST_ACK
50373+ state.
50374+
50375+config GRKERNSEC_SOCKET
50376+ bool "Socket restrictions"
50377+ depends on NET
50378+ help
50379+ If you say Y here, you will be able to choose from several options.
50380+ If you assign a GID on your system and add it to the supplementary
50381+ groups of users you want to restrict socket access to, this patch
50382+ will perform up to three things, based on the option(s) you choose.
50383+
50384+config GRKERNSEC_SOCKET_ALL
50385+ bool "Deny any sockets to group"
50386+ depends on GRKERNSEC_SOCKET
50387+ help
50388+ If you say Y here, you will be able to choose a GID of whose users will
50389+ be unable to connect to other hosts from your machine or run server
50390+ applications from your machine. If the sysctl option is enabled, a
50391+ sysctl option with name "socket_all" is created.
50392+
50393+config GRKERNSEC_SOCKET_ALL_GID
50394+ int "GID to deny all sockets for"
50395+ depends on GRKERNSEC_SOCKET_ALL
50396+ default 1004
50397+ help
50398+ Here you can choose the GID to disable socket access for. Remember to
50399+ add the users you want socket access disabled for to the GID
50400+ specified here. If the sysctl option is enabled, a sysctl option
50401+ with name "socket_all_gid" is created.
50402+
50403+config GRKERNSEC_SOCKET_CLIENT
50404+ bool "Deny client sockets to group"
50405+ depends on GRKERNSEC_SOCKET
50406+ help
50407+ If you say Y here, you will be able to choose a GID of whose users will
50408+ be unable to connect to other hosts from your machine, but will be
50409+ able to run servers. If this option is enabled, all users in the group
50410+ you specify will have to use passive mode when initiating ftp transfers
50411+ from the shell on your machine. If the sysctl option is enabled, a
50412+ sysctl option with name "socket_client" is created.
50413+
50414+config GRKERNSEC_SOCKET_CLIENT_GID
50415+ int "GID to deny client sockets for"
50416+ depends on GRKERNSEC_SOCKET_CLIENT
50417+ default 1003
50418+ help
50419+ Here you can choose the GID to disable client socket access for.
50420+ Remember to add the users you want client socket access disabled for to
50421+ the GID specified here. If the sysctl option is enabled, a sysctl
50422+ option with name "socket_client_gid" is created.
50423+
50424+config GRKERNSEC_SOCKET_SERVER
50425+ bool "Deny server sockets to group"
50426+ depends on GRKERNSEC_SOCKET
50427+ help
50428+ If you say Y here, you will be able to choose a GID of whose users will
50429+ be unable to run server applications from your machine. If the sysctl
50430+ option is enabled, a sysctl option with name "socket_server" is created.
50431+
50432+config GRKERNSEC_SOCKET_SERVER_GID
50433+ int "GID to deny server sockets for"
50434+ depends on GRKERNSEC_SOCKET_SERVER
50435+ default 1002
50436+ help
50437+ Here you can choose the GID to disable server socket access for.
50438+ Remember to add the users you want server socket access disabled for to
50439+ the GID specified here. If the sysctl option is enabled, a sysctl
50440+ option with name "socket_server_gid" is created.
50441+
50442+endmenu
50443+menu "Sysctl support"
50444+depends on GRKERNSEC && SYSCTL
50445+
50446+config GRKERNSEC_SYSCTL
50447+ bool "Sysctl support"
50448+ help
50449+ If you say Y here, you will be able to change the options that
50450+ grsecurity runs with at bootup, without having to recompile your
50451+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50452+ to enable (1) or disable (0) various features. All the sysctl entries
50453+ are mutable until the "grsec_lock" entry is set to a non-zero value.
50454+ All features enabled in the kernel configuration are disabled at boot
50455+ if you do not say Y to the "Turn on features by default" option.
50456+ All options should be set at startup, and the grsec_lock entry should
50457+ be set to a non-zero value after all the options are set.
50458+ *THIS IS EXTREMELY IMPORTANT*
50459+
50460+config GRKERNSEC_SYSCTL_DISTRO
50461+ bool "Extra sysctl support for distro makers (READ HELP)"
50462+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50463+ help
50464+ If you say Y here, additional sysctl options will be created
50465+ for features that affect processes running as root. Therefore,
50466+ it is critical when using this option that the grsec_lock entry be
50467+ enabled after boot. Only distros with prebuilt kernel packages
50468+ with this option enabled that can ensure grsec_lock is enabled
50469+ after boot should use this option.
50470+ *Failure to set grsec_lock after boot makes all grsec features
50471+ this option covers useless*
50472+
50473+ Currently this option creates the following sysctl entries:
50474+ "Disable Privileged I/O": "disable_priv_io"
50475+
50476+config GRKERNSEC_SYSCTL_ON
50477+ bool "Turn on features by default"
50478+ depends on GRKERNSEC_SYSCTL
50479+ help
50480+ If you say Y here, instead of having all features enabled in the
50481+ kernel configuration disabled at boot time, the features will be
50482+ enabled at boot time. It is recommended you say Y here unless
50483+ there is some reason you would want all sysctl-tunable features to
50484+ be disabled by default. As mentioned elsewhere, it is important
50485+ to enable the grsec_lock entry once you have finished modifying
50486+ the sysctl entries.
50487+
50488+endmenu
50489+menu "Logging Options"
50490+depends on GRKERNSEC
50491+
50492+config GRKERNSEC_FLOODTIME
50493+ int "Seconds in between log messages (minimum)"
50494+ default 10
50495+ help
50496+ This option allows you to enforce the number of seconds between
50497+ grsecurity log messages. The default should be suitable for most
50498+ people, however, if you choose to change it, choose a value small enough
50499+ to allow informative logs to be produced, but large enough to
50500+ prevent flooding.
50501+
50502+config GRKERNSEC_FLOODBURST
50503+ int "Number of messages in a burst (maximum)"
50504+ default 6
50505+ help
50506+ This option allows you to choose the maximum number of messages allowed
50507+ within the flood time interval you chose in a separate option. The
50508+ default should be suitable for most people, however if you find that
50509+ many of your logs are being interpreted as flooding, you may want to
50510+ raise this value.
50511+
50512+endmenu
50513+
50514+endmenu
50515diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50516new file mode 100644
50517index 0000000..1b9afa9
50518--- /dev/null
50519+++ b/grsecurity/Makefile
50520@@ -0,0 +1,38 @@
50521+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50522+# during 2001-2009 it has been completely redesigned by Brad Spengler
50523+# into an RBAC system
50524+#
50525+# All code in this directory and various hooks inserted throughout the kernel
50526+# are copyright Brad Spengler - Open Source Security, Inc., and released
50527+# under the GPL v2 or higher
50528+
50529+KBUILD_CFLAGS += -Werror
50530+
50531+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50532+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
50533+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50534+
50535+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50536+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50537+ gracl_learn.o grsec_log.o
50538+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50539+
50540+ifdef CONFIG_NET
50541+obj-y += grsec_sock.o
50542+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50543+endif
50544+
50545+ifndef CONFIG_GRKERNSEC
50546+obj-y += grsec_disabled.o
50547+endif
50548+
50549+ifdef CONFIG_GRKERNSEC_HIDESYM
50550+extra-y := grsec_hidesym.o
50551+$(obj)/grsec_hidesym.o:
50552+ @-chmod -f 500 /boot
50553+ @-chmod -f 500 /lib/modules
50554+ @-chmod -f 500 /lib64/modules
50555+ @-chmod -f 500 /lib32/modules
50556+ @-chmod -f 700 .
50557+ @echo ' grsec: protected kernel image paths'
50558+endif
50559diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
50560new file mode 100644
50561index 0000000..42813ac
50562--- /dev/null
50563+++ b/grsecurity/gracl.c
50564@@ -0,0 +1,4192 @@
50565+#include <linux/kernel.h>
50566+#include <linux/module.h>
50567+#include <linux/sched.h>
50568+#include <linux/mm.h>
50569+#include <linux/file.h>
50570+#include <linux/fs.h>
50571+#include <linux/namei.h>
50572+#include <linux/mount.h>
50573+#include <linux/tty.h>
50574+#include <linux/proc_fs.h>
50575+#include <linux/lglock.h>
50576+#include <linux/slab.h>
50577+#include <linux/vmalloc.h>
50578+#include <linux/types.h>
50579+#include <linux/sysctl.h>
50580+#include <linux/netdevice.h>
50581+#include <linux/ptrace.h>
50582+#include <linux/gracl.h>
50583+#include <linux/gralloc.h>
50584+#include <linux/security.h>
50585+#include <linux/grinternal.h>
50586+#include <linux/pid_namespace.h>
50587+#include <linux/fdtable.h>
50588+#include <linux/percpu.h>
50589+#include "../fs/mount.h"
50590+
50591+#include <asm/uaccess.h>
50592+#include <asm/errno.h>
50593+#include <asm/mman.h>
50594+
50595+static struct acl_role_db acl_role_set;
50596+static struct name_db name_set;
50597+static struct inodev_db inodev_set;
50598+
50599+/* for keeping track of userspace pointers used for subjects, so we
50600+ can share references in the kernel as well
50601+*/
50602+
50603+static struct path real_root;
50604+
50605+static struct acl_subj_map_db subj_map_set;
50606+
50607+static struct acl_role_label *default_role;
50608+
50609+static struct acl_role_label *role_list;
50610+
50611+static u16 acl_sp_role_value;
50612+
50613+extern char *gr_shared_page[4];
50614+static DEFINE_MUTEX(gr_dev_mutex);
50615+DEFINE_RWLOCK(gr_inode_lock);
50616+
50617+struct gr_arg *gr_usermode;
50618+
50619+static unsigned int gr_status __read_only = GR_STATUS_INIT;
50620+
50621+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50622+extern void gr_clear_learn_entries(void);
50623+
50624+#ifdef CONFIG_GRKERNSEC_RESLOG
50625+extern void gr_log_resource(const struct task_struct *task,
50626+ const int res, const unsigned long wanted, const int gt);
50627+#endif
50628+
50629+unsigned char *gr_system_salt;
50630+unsigned char *gr_system_sum;
50631+
50632+static struct sprole_pw **acl_special_roles = NULL;
50633+static __u16 num_sprole_pws = 0;
50634+
50635+static struct acl_role_label *kernel_role = NULL;
50636+
50637+static unsigned int gr_auth_attempts = 0;
50638+static unsigned long gr_auth_expires = 0UL;
50639+
50640+#ifdef CONFIG_NET
50641+extern struct vfsmount *sock_mnt;
50642+#endif
50643+
50644+extern struct vfsmount *pipe_mnt;
50645+extern struct vfsmount *shm_mnt;
50646+#ifdef CONFIG_HUGETLBFS
50647+extern struct vfsmount *hugetlbfs_vfsmount;
50648+#endif
50649+
50650+static struct acl_object_label *fakefs_obj_rw;
50651+static struct acl_object_label *fakefs_obj_rwx;
50652+
50653+extern int gr_init_uidset(void);
50654+extern void gr_free_uidset(void);
50655+extern void gr_remove_uid(uid_t uid);
50656+extern int gr_find_uid(uid_t uid);
50657+
50658+DECLARE_BRLOCK(vfsmount_lock);
50659+
50660+__inline__ int
50661+gr_acl_is_enabled(void)
50662+{
50663+ return (gr_status & GR_READY);
50664+}
50665+
50666+#ifdef CONFIG_BTRFS_FS
50667+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50668+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50669+#endif
50670+
50671+static inline dev_t __get_dev(const struct dentry *dentry)
50672+{
50673+#ifdef CONFIG_BTRFS_FS
50674+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50675+ return get_btrfs_dev_from_inode(dentry->d_inode);
50676+ else
50677+#endif
50678+ return dentry->d_inode->i_sb->s_dev;
50679+}
50680+
50681+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50682+{
50683+ return __get_dev(dentry);
50684+}
50685+
50686+static char gr_task_roletype_to_char(struct task_struct *task)
50687+{
50688+ switch (task->role->roletype &
50689+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50690+ GR_ROLE_SPECIAL)) {
50691+ case GR_ROLE_DEFAULT:
50692+ return 'D';
50693+ case GR_ROLE_USER:
50694+ return 'U';
50695+ case GR_ROLE_GROUP:
50696+ return 'G';
50697+ case GR_ROLE_SPECIAL:
50698+ return 'S';
50699+ }
50700+
50701+ return 'X';
50702+}
50703+
50704+char gr_roletype_to_char(void)
50705+{
50706+ return gr_task_roletype_to_char(current);
50707+}
50708+
50709+__inline__ int
50710+gr_acl_tpe_check(void)
50711+{
50712+ if (unlikely(!(gr_status & GR_READY)))
50713+ return 0;
50714+ if (current->role->roletype & GR_ROLE_TPE)
50715+ return 1;
50716+ else
50717+ return 0;
50718+}
50719+
50720+int
50721+gr_handle_rawio(const struct inode *inode)
50722+{
50723+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50724+ if (inode && S_ISBLK(inode->i_mode) &&
50725+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50726+ !capable(CAP_SYS_RAWIO))
50727+ return 1;
50728+#endif
50729+ return 0;
50730+}
50731+
50732+static int
50733+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50734+{
50735+ if (likely(lena != lenb))
50736+ return 0;
50737+
50738+ return !memcmp(a, b, lena);
50739+}
50740+
50741+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
50742+{
50743+ *buflen -= namelen;
50744+ if (*buflen < 0)
50745+ return -ENAMETOOLONG;
50746+ *buffer -= namelen;
50747+ memcpy(*buffer, str, namelen);
50748+ return 0;
50749+}
50750+
50751+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50752+{
50753+ return prepend(buffer, buflen, name->name, name->len);
50754+}
50755+
50756+static int prepend_path(const struct path *path, struct path *root,
50757+ char **buffer, int *buflen)
50758+{
50759+ struct dentry *dentry = path->dentry;
50760+ struct vfsmount *vfsmnt = path->mnt;
50761+ struct mount *mnt = real_mount(vfsmnt);
50762+ bool slash = false;
50763+ int error = 0;
50764+
50765+ while (dentry != root->dentry || vfsmnt != root->mnt) {
50766+ struct dentry * parent;
50767+
50768+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50769+ /* Global root? */
50770+ if (!mnt_has_parent(mnt)) {
50771+ goto out;
50772+ }
50773+ dentry = mnt->mnt_mountpoint;
50774+ mnt = mnt->mnt_parent;
50775+ vfsmnt = &mnt->mnt;
50776+ continue;
50777+ }
50778+ parent = dentry->d_parent;
50779+ prefetch(parent);
50780+ spin_lock(&dentry->d_lock);
50781+ error = prepend_name(buffer, buflen, &dentry->d_name);
50782+ spin_unlock(&dentry->d_lock);
50783+ if (!error)
50784+ error = prepend(buffer, buflen, "/", 1);
50785+ if (error)
50786+ break;
50787+
50788+ slash = true;
50789+ dentry = parent;
50790+ }
50791+
50792+out:
50793+ if (!error && !slash)
50794+ error = prepend(buffer, buflen, "/", 1);
50795+
50796+ return error;
50797+}
50798+
50799+/* this must be called with vfsmount_lock and rename_lock held */
50800+
50801+static char *__our_d_path(const struct path *path, struct path *root,
50802+ char *buf, int buflen)
50803+{
50804+ char *res = buf + buflen;
50805+ int error;
50806+
50807+ prepend(&res, &buflen, "\0", 1);
50808+ error = prepend_path(path, root, &res, &buflen);
50809+ if (error)
50810+ return ERR_PTR(error);
50811+
50812+ return res;
50813+}
50814+
50815+static char *
50816+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
50817+{
50818+ char *retval;
50819+
50820+ retval = __our_d_path(path, root, buf, buflen);
50821+ if (unlikely(IS_ERR(retval)))
50822+ retval = strcpy(buf, "<path too long>");
50823+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50824+ retval[1] = '\0';
50825+
50826+ return retval;
50827+}
50828+
50829+static char *
50830+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50831+ char *buf, int buflen)
50832+{
50833+ struct path path;
50834+ char *res;
50835+
50836+ path.dentry = (struct dentry *)dentry;
50837+ path.mnt = (struct vfsmount *)vfsmnt;
50838+
50839+ /* we can use real_root.dentry, real_root.mnt, because this is only called
50840+ by the RBAC system */
50841+ res = gen_full_path(&path, &real_root, buf, buflen);
50842+
50843+ return res;
50844+}
50845+
50846+static char *
50847+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50848+ char *buf, int buflen)
50849+{
50850+ char *res;
50851+ struct path path;
50852+ struct path root;
50853+ struct task_struct *reaper = &init_task;
50854+
50855+ path.dentry = (struct dentry *)dentry;
50856+ path.mnt = (struct vfsmount *)vfsmnt;
50857+
50858+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
50859+ get_fs_root(reaper->fs, &root);
50860+
50861+ write_seqlock(&rename_lock);
50862+ br_read_lock(vfsmount_lock);
50863+ res = gen_full_path(&path, &root, buf, buflen);
50864+ br_read_unlock(vfsmount_lock);
50865+ write_sequnlock(&rename_lock);
50866+
50867+ path_put(&root);
50868+ return res;
50869+}
50870+
50871+static char *
50872+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50873+{
50874+ char *ret;
50875+ write_seqlock(&rename_lock);
50876+ br_read_lock(vfsmount_lock);
50877+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50878+ PAGE_SIZE);
50879+ br_read_unlock(vfsmount_lock);
50880+ write_sequnlock(&rename_lock);
50881+ return ret;
50882+}
50883+
50884+static char *
50885+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50886+{
50887+ char *ret;
50888+ char *buf;
50889+ int buflen;
50890+
50891+ write_seqlock(&rename_lock);
50892+ br_read_lock(vfsmount_lock);
50893+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50894+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
50895+ buflen = (int)(ret - buf);
50896+ if (buflen >= 5)
50897+ prepend(&ret, &buflen, "/proc", 5);
50898+ else
50899+ ret = strcpy(buf, "<path too long>");
50900+ br_read_unlock(vfsmount_lock);
50901+ write_sequnlock(&rename_lock);
50902+ return ret;
50903+}
50904+
50905+char *
50906+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
50907+{
50908+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50909+ PAGE_SIZE);
50910+}
50911+
50912+char *
50913+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
50914+{
50915+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50916+ PAGE_SIZE);
50917+}
50918+
50919+char *
50920+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
50921+{
50922+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
50923+ PAGE_SIZE);
50924+}
50925+
50926+char *
50927+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
50928+{
50929+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
50930+ PAGE_SIZE);
50931+}
50932+
50933+char *
50934+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
50935+{
50936+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
50937+ PAGE_SIZE);
50938+}
50939+
50940+__inline__ __u32
50941+to_gr_audit(const __u32 reqmode)
50942+{
50943+ /* masks off auditable permission flags, then shifts them to create
50944+ auditing flags, and adds the special case of append auditing if
50945+ we're requesting write */
50946+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
50947+}
50948+
50949+struct acl_subject_label *
50950+lookup_subject_map(const struct acl_subject_label *userp)
50951+{
50952+ unsigned int index = shash(userp, subj_map_set.s_size);
50953+ struct subject_map *match;
50954+
50955+ match = subj_map_set.s_hash[index];
50956+
50957+ while (match && match->user != userp)
50958+ match = match->next;
50959+
50960+ if (match != NULL)
50961+ return match->kernel;
50962+ else
50963+ return NULL;
50964+}
50965+
50966+static void
50967+insert_subj_map_entry(struct subject_map *subjmap)
50968+{
50969+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
50970+ struct subject_map **curr;
50971+
50972+ subjmap->prev = NULL;
50973+
50974+ curr = &subj_map_set.s_hash[index];
50975+ if (*curr != NULL)
50976+ (*curr)->prev = subjmap;
50977+
50978+ subjmap->next = *curr;
50979+ *curr = subjmap;
50980+
50981+ return;
50982+}
50983+
50984+static struct acl_role_label *
50985+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
50986+ const gid_t gid)
50987+{
50988+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
50989+ struct acl_role_label *match;
50990+ struct role_allowed_ip *ipp;
50991+ unsigned int x;
50992+ u32 curr_ip = task->signal->curr_ip;
50993+
50994+ task->signal->saved_ip = curr_ip;
50995+
50996+ match = acl_role_set.r_hash[index];
50997+
50998+ while (match) {
50999+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
51000+ for (x = 0; x < match->domain_child_num; x++) {
51001+ if (match->domain_children[x] == uid)
51002+ goto found;
51003+ }
51004+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
51005+ break;
51006+ match = match->next;
51007+ }
51008+found:
51009+ if (match == NULL) {
51010+ try_group:
51011+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
51012+ match = acl_role_set.r_hash[index];
51013+
51014+ while (match) {
51015+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
51016+ for (x = 0; x < match->domain_child_num; x++) {
51017+ if (match->domain_children[x] == gid)
51018+ goto found2;
51019+ }
51020+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
51021+ break;
51022+ match = match->next;
51023+ }
51024+found2:
51025+ if (match == NULL)
51026+ match = default_role;
51027+ if (match->allowed_ips == NULL)
51028+ return match;
51029+ else {
51030+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51031+ if (likely
51032+ ((ntohl(curr_ip) & ipp->netmask) ==
51033+ (ntohl(ipp->addr) & ipp->netmask)))
51034+ return match;
51035+ }
51036+ match = default_role;
51037+ }
51038+ } else if (match->allowed_ips == NULL) {
51039+ return match;
51040+ } else {
51041+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51042+ if (likely
51043+ ((ntohl(curr_ip) & ipp->netmask) ==
51044+ (ntohl(ipp->addr) & ipp->netmask)))
51045+ return match;
51046+ }
51047+ goto try_group;
51048+ }
51049+
51050+ return match;
51051+}
51052+
51053+struct acl_subject_label *
51054+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
51055+ const struct acl_role_label *role)
51056+{
51057+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51058+ struct acl_subject_label *match;
51059+
51060+ match = role->subj_hash[index];
51061+
51062+ while (match && (match->inode != ino || match->device != dev ||
51063+ (match->mode & GR_DELETED))) {
51064+ match = match->next;
51065+ }
51066+
51067+ if (match && !(match->mode & GR_DELETED))
51068+ return match;
51069+ else
51070+ return NULL;
51071+}
51072+
51073+struct acl_subject_label *
51074+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
51075+ const struct acl_role_label *role)
51076+{
51077+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51078+ struct acl_subject_label *match;
51079+
51080+ match = role->subj_hash[index];
51081+
51082+ while (match && (match->inode != ino || match->device != dev ||
51083+ !(match->mode & GR_DELETED))) {
51084+ match = match->next;
51085+ }
51086+
51087+ if (match && (match->mode & GR_DELETED))
51088+ return match;
51089+ else
51090+ return NULL;
51091+}
51092+
51093+static struct acl_object_label *
51094+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
51095+ const struct acl_subject_label *subj)
51096+{
51097+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51098+ struct acl_object_label *match;
51099+
51100+ match = subj->obj_hash[index];
51101+
51102+ while (match && (match->inode != ino || match->device != dev ||
51103+ (match->mode & GR_DELETED))) {
51104+ match = match->next;
51105+ }
51106+
51107+ if (match && !(match->mode & GR_DELETED))
51108+ return match;
51109+ else
51110+ return NULL;
51111+}
51112+
51113+static struct acl_object_label *
51114+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
51115+ const struct acl_subject_label *subj)
51116+{
51117+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51118+ struct acl_object_label *match;
51119+
51120+ match = subj->obj_hash[index];
51121+
51122+ while (match && (match->inode != ino || match->device != dev ||
51123+ !(match->mode & GR_DELETED))) {
51124+ match = match->next;
51125+ }
51126+
51127+ if (match && (match->mode & GR_DELETED))
51128+ return match;
51129+
51130+ match = subj->obj_hash[index];
51131+
51132+ while (match && (match->inode != ino || match->device != dev ||
51133+ (match->mode & GR_DELETED))) {
51134+ match = match->next;
51135+ }
51136+
51137+ if (match && !(match->mode & GR_DELETED))
51138+ return match;
51139+ else
51140+ return NULL;
51141+}
51142+
51143+static struct name_entry *
51144+lookup_name_entry(const char *name)
51145+{
51146+ unsigned int len = strlen(name);
51147+ unsigned int key = full_name_hash(name, len);
51148+ unsigned int index = key % name_set.n_size;
51149+ struct name_entry *match;
51150+
51151+ match = name_set.n_hash[index];
51152+
51153+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
51154+ match = match->next;
51155+
51156+ return match;
51157+}
51158+
51159+static struct name_entry *
51160+lookup_name_entry_create(const char *name)
51161+{
51162+ unsigned int len = strlen(name);
51163+ unsigned int key = full_name_hash(name, len);
51164+ unsigned int index = key % name_set.n_size;
51165+ struct name_entry *match;
51166+
51167+ match = name_set.n_hash[index];
51168+
51169+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51170+ !match->deleted))
51171+ match = match->next;
51172+
51173+ if (match && match->deleted)
51174+ return match;
51175+
51176+ match = name_set.n_hash[index];
51177+
51178+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51179+ match->deleted))
51180+ match = match->next;
51181+
51182+ if (match && !match->deleted)
51183+ return match;
51184+ else
51185+ return NULL;
51186+}
51187+
51188+static struct inodev_entry *
51189+lookup_inodev_entry(const ino_t ino, const dev_t dev)
51190+{
51191+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
51192+ struct inodev_entry *match;
51193+
51194+ match = inodev_set.i_hash[index];
51195+
51196+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
51197+ match = match->next;
51198+
51199+ return match;
51200+}
51201+
51202+static void
51203+insert_inodev_entry(struct inodev_entry *entry)
51204+{
51205+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
51206+ inodev_set.i_size);
51207+ struct inodev_entry **curr;
51208+
51209+ entry->prev = NULL;
51210+
51211+ curr = &inodev_set.i_hash[index];
51212+ if (*curr != NULL)
51213+ (*curr)->prev = entry;
51214+
51215+ entry->next = *curr;
51216+ *curr = entry;
51217+
51218+ return;
51219+}
51220+
51221+static void
51222+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
51223+{
51224+ unsigned int index =
51225+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
51226+ struct acl_role_label **curr;
51227+ struct acl_role_label *tmp, *tmp2;
51228+
51229+ curr = &acl_role_set.r_hash[index];
51230+
51231+ /* simple case, slot is empty, just set it to our role */
51232+ if (*curr == NULL) {
51233+ *curr = role;
51234+ } else {
51235+ /* example:
51236+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
51237+ 2 -> 3
51238+ */
51239+ /* first check to see if we can already be reached via this slot */
51240+ tmp = *curr;
51241+ while (tmp && tmp != role)
51242+ tmp = tmp->next;
51243+ if (tmp == role) {
51244+ /* we don't need to add ourselves to this slot's chain */
51245+ return;
51246+ }
51247+ /* we need to add ourselves to this chain, two cases */
51248+ if (role->next == NULL) {
51249+ /* simple case, append the current chain to our role */
51250+ role->next = *curr;
51251+ *curr = role;
51252+ } else {
51253+ /* 1 -> 2 -> 3 -> 4
51254+ 2 -> 3 -> 4
51255+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
51256+ */
51257+ /* trickier case: walk our role's chain until we find
51258+ the role for the start of the current slot's chain */
51259+ tmp = role;
51260+ tmp2 = *curr;
51261+ while (tmp->next && tmp->next != tmp2)
51262+ tmp = tmp->next;
51263+ if (tmp->next == tmp2) {
51264+ /* from example above, we found 3, so just
51265+ replace this slot's chain with ours */
51266+ *curr = role;
51267+ } else {
51268+ /* we didn't find a subset of our role's chain
51269+ in the current slot's chain, so append their
51270+ chain to ours, and set us as the first role in
51271+ the slot's chain
51272+
51273+ we could fold this case with the case above,
51274+ but making it explicit for clarity
51275+ */
51276+ tmp->next = tmp2;
51277+ *curr = role;
51278+ }
51279+ }
51280+ }
51281+
51282+ return;
51283+}
51284+
51285+static void
51286+insert_acl_role_label(struct acl_role_label *role)
51287+{
51288+ int i;
51289+
51290+ if (role_list == NULL) {
51291+ role_list = role;
51292+ role->prev = NULL;
51293+ } else {
51294+ role->prev = role_list;
51295+ role_list = role;
51296+ }
51297+
51298+ /* used for hash chains */
51299+ role->next = NULL;
51300+
51301+ if (role->roletype & GR_ROLE_DOMAIN) {
51302+ for (i = 0; i < role->domain_child_num; i++)
51303+ __insert_acl_role_label(role, role->domain_children[i]);
51304+ } else
51305+ __insert_acl_role_label(role, role->uidgid);
51306+}
51307+
51308+static int
51309+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
51310+{
51311+ struct name_entry **curr, *nentry;
51312+ struct inodev_entry *ientry;
51313+ unsigned int len = strlen(name);
51314+ unsigned int key = full_name_hash(name, len);
51315+ unsigned int index = key % name_set.n_size;
51316+
51317+ curr = &name_set.n_hash[index];
51318+
51319+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
51320+ curr = &((*curr)->next);
51321+
51322+ if (*curr != NULL)
51323+ return 1;
51324+
51325+ nentry = acl_alloc(sizeof (struct name_entry));
51326+ if (nentry == NULL)
51327+ return 0;
51328+ ientry = acl_alloc(sizeof (struct inodev_entry));
51329+ if (ientry == NULL)
51330+ return 0;
51331+ ientry->nentry = nentry;
51332+
51333+ nentry->key = key;
51334+ nentry->name = name;
51335+ nentry->inode = inode;
51336+ nentry->device = device;
51337+ nentry->len = len;
51338+ nentry->deleted = deleted;
51339+
51340+ nentry->prev = NULL;
51341+ curr = &name_set.n_hash[index];
51342+ if (*curr != NULL)
51343+ (*curr)->prev = nentry;
51344+ nentry->next = *curr;
51345+ *curr = nentry;
51346+
51347+ /* insert us into the table searchable by inode/dev */
51348+ insert_inodev_entry(ientry);
51349+
51350+ return 1;
51351+}
51352+
51353+static void
51354+insert_acl_obj_label(struct acl_object_label *obj,
51355+ struct acl_subject_label *subj)
51356+{
51357+ unsigned int index =
51358+ fhash(obj->inode, obj->device, subj->obj_hash_size);
51359+ struct acl_object_label **curr;
51360+
51361+
51362+ obj->prev = NULL;
51363+
51364+ curr = &subj->obj_hash[index];
51365+ if (*curr != NULL)
51366+ (*curr)->prev = obj;
51367+
51368+ obj->next = *curr;
51369+ *curr = obj;
51370+
51371+ return;
51372+}
51373+
51374+static void
51375+insert_acl_subj_label(struct acl_subject_label *obj,
51376+ struct acl_role_label *role)
51377+{
51378+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
51379+ struct acl_subject_label **curr;
51380+
51381+ obj->prev = NULL;
51382+
51383+ curr = &role->subj_hash[index];
51384+ if (*curr != NULL)
51385+ (*curr)->prev = obj;
51386+
51387+ obj->next = *curr;
51388+ *curr = obj;
51389+
51390+ return;
51391+}
51392+
51393+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
51394+
51395+static void *
51396+create_table(__u32 * len, int elementsize)
51397+{
51398+ unsigned int table_sizes[] = {
51399+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
51400+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
51401+ 4194301, 8388593, 16777213, 33554393, 67108859
51402+ };
51403+ void *newtable = NULL;
51404+ unsigned int pwr = 0;
51405+
51406+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
51407+ table_sizes[pwr] <= *len)
51408+ pwr++;
51409+
51410+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
51411+ return newtable;
51412+
51413+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
51414+ newtable =
51415+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
51416+ else
51417+ newtable = vmalloc(table_sizes[pwr] * elementsize);
51418+
51419+ *len = table_sizes[pwr];
51420+
51421+ return newtable;
51422+}
51423+
51424+static int
51425+init_variables(const struct gr_arg *arg)
51426+{
51427+ struct task_struct *reaper = &init_task;
51428+ unsigned int stacksize;
51429+
51430+ subj_map_set.s_size = arg->role_db.num_subjects;
51431+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
51432+ name_set.n_size = arg->role_db.num_objects;
51433+ inodev_set.i_size = arg->role_db.num_objects;
51434+
51435+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
51436+ !name_set.n_size || !inodev_set.i_size)
51437+ return 1;
51438+
51439+ if (!gr_init_uidset())
51440+ return 1;
51441+
51442+ /* set up the stack that holds allocation info */
51443+
51444+ stacksize = arg->role_db.num_pointers + 5;
51445+
51446+ if (!acl_alloc_stack_init(stacksize))
51447+ return 1;
51448+
51449+ /* grab reference for the real root dentry and vfsmount */
51450+ get_fs_root(reaper->fs, &real_root);
51451+
51452+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51453+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
51454+#endif
51455+
51456+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
51457+ if (fakefs_obj_rw == NULL)
51458+ return 1;
51459+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
51460+
51461+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
51462+ if (fakefs_obj_rwx == NULL)
51463+ return 1;
51464+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
51465+
51466+ subj_map_set.s_hash =
51467+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
51468+ acl_role_set.r_hash =
51469+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
51470+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
51471+ inodev_set.i_hash =
51472+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
51473+
51474+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
51475+ !name_set.n_hash || !inodev_set.i_hash)
51476+ return 1;
51477+
51478+ memset(subj_map_set.s_hash, 0,
51479+ sizeof(struct subject_map *) * subj_map_set.s_size);
51480+ memset(acl_role_set.r_hash, 0,
51481+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
51482+ memset(name_set.n_hash, 0,
51483+ sizeof (struct name_entry *) * name_set.n_size);
51484+ memset(inodev_set.i_hash, 0,
51485+ sizeof (struct inodev_entry *) * inodev_set.i_size);
51486+
51487+ return 0;
51488+}
51489+
51490+/* free information not needed after startup
51491+ currently contains user->kernel pointer mappings for subjects
51492+*/
51493+
51494+static void
51495+free_init_variables(void)
51496+{
51497+ __u32 i;
51498+
51499+ if (subj_map_set.s_hash) {
51500+ for (i = 0; i < subj_map_set.s_size; i++) {
51501+ if (subj_map_set.s_hash[i]) {
51502+ kfree(subj_map_set.s_hash[i]);
51503+ subj_map_set.s_hash[i] = NULL;
51504+ }
51505+ }
51506+
51507+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51508+ PAGE_SIZE)
51509+ kfree(subj_map_set.s_hash);
51510+ else
51511+ vfree(subj_map_set.s_hash);
51512+ }
51513+
51514+ return;
51515+}
51516+
51517+static void
51518+free_variables(void)
51519+{
51520+ struct acl_subject_label *s;
51521+ struct acl_role_label *r;
51522+ struct task_struct *task, *task2;
51523+ unsigned int x;
51524+
51525+ gr_clear_learn_entries();
51526+
51527+ read_lock(&tasklist_lock);
51528+ do_each_thread(task2, task) {
51529+ task->acl_sp_role = 0;
51530+ task->acl_role_id = 0;
51531+ task->acl = NULL;
51532+ task->role = NULL;
51533+ } while_each_thread(task2, task);
51534+ read_unlock(&tasklist_lock);
51535+
51536+ /* release the reference to the real root dentry and vfsmount */
51537+ path_put(&real_root);
51538+ memset(&real_root, 0, sizeof(real_root));
51539+
51540+ /* free all object hash tables */
51541+
51542+ FOR_EACH_ROLE_START(r)
51543+ if (r->subj_hash == NULL)
51544+ goto next_role;
51545+ FOR_EACH_SUBJECT_START(r, s, x)
51546+ if (s->obj_hash == NULL)
51547+ break;
51548+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51549+ kfree(s->obj_hash);
51550+ else
51551+ vfree(s->obj_hash);
51552+ FOR_EACH_SUBJECT_END(s, x)
51553+ FOR_EACH_NESTED_SUBJECT_START(r, s)
51554+ if (s->obj_hash == NULL)
51555+ break;
51556+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51557+ kfree(s->obj_hash);
51558+ else
51559+ vfree(s->obj_hash);
51560+ FOR_EACH_NESTED_SUBJECT_END(s)
51561+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51562+ kfree(r->subj_hash);
51563+ else
51564+ vfree(r->subj_hash);
51565+ r->subj_hash = NULL;
51566+next_role:
51567+ FOR_EACH_ROLE_END(r)
51568+
51569+ acl_free_all();
51570+
51571+ if (acl_role_set.r_hash) {
51572+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51573+ PAGE_SIZE)
51574+ kfree(acl_role_set.r_hash);
51575+ else
51576+ vfree(acl_role_set.r_hash);
51577+ }
51578+ if (name_set.n_hash) {
51579+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
51580+ PAGE_SIZE)
51581+ kfree(name_set.n_hash);
51582+ else
51583+ vfree(name_set.n_hash);
51584+ }
51585+
51586+ if (inodev_set.i_hash) {
51587+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51588+ PAGE_SIZE)
51589+ kfree(inodev_set.i_hash);
51590+ else
51591+ vfree(inodev_set.i_hash);
51592+ }
51593+
51594+ gr_free_uidset();
51595+
51596+ memset(&name_set, 0, sizeof (struct name_db));
51597+ memset(&inodev_set, 0, sizeof (struct inodev_db));
51598+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51599+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51600+
51601+ default_role = NULL;
51602+ kernel_role = NULL;
51603+ role_list = NULL;
51604+
51605+ return;
51606+}
51607+
51608+static __u32
51609+count_user_objs(struct acl_object_label *userp)
51610+{
51611+ struct acl_object_label o_tmp;
51612+ __u32 num = 0;
51613+
51614+ while (userp) {
51615+ if (copy_from_user(&o_tmp, userp,
51616+ sizeof (struct acl_object_label)))
51617+ break;
51618+
51619+ userp = o_tmp.prev;
51620+ num++;
51621+ }
51622+
51623+ return num;
51624+}
51625+
51626+static struct acl_subject_label *
51627+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51628+
51629+static int
51630+copy_user_glob(struct acl_object_label *obj)
51631+{
51632+ struct acl_object_label *g_tmp, **guser;
51633+ unsigned int len;
51634+ char *tmp;
51635+
51636+ if (obj->globbed == NULL)
51637+ return 0;
51638+
51639+ guser = &obj->globbed;
51640+ while (*guser) {
51641+ g_tmp = (struct acl_object_label *)
51642+ acl_alloc(sizeof (struct acl_object_label));
51643+ if (g_tmp == NULL)
51644+ return -ENOMEM;
51645+
51646+ if (copy_from_user(g_tmp, *guser,
51647+ sizeof (struct acl_object_label)))
51648+ return -EFAULT;
51649+
51650+ len = strnlen_user(g_tmp->filename, PATH_MAX);
51651+
51652+ if (!len || len >= PATH_MAX)
51653+ return -EINVAL;
51654+
51655+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51656+ return -ENOMEM;
51657+
51658+ if (copy_from_user(tmp, g_tmp->filename, len))
51659+ return -EFAULT;
51660+ tmp[len-1] = '\0';
51661+ g_tmp->filename = tmp;
51662+
51663+ *guser = g_tmp;
51664+ guser = &(g_tmp->next);
51665+ }
51666+
51667+ return 0;
51668+}
51669+
51670+static int
51671+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51672+ struct acl_role_label *role)
51673+{
51674+ struct acl_object_label *o_tmp;
51675+ unsigned int len;
51676+ int ret;
51677+ char *tmp;
51678+
51679+ while (userp) {
51680+ if ((o_tmp = (struct acl_object_label *)
51681+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
51682+ return -ENOMEM;
51683+
51684+ if (copy_from_user(o_tmp, userp,
51685+ sizeof (struct acl_object_label)))
51686+ return -EFAULT;
51687+
51688+ userp = o_tmp->prev;
51689+
51690+ len = strnlen_user(o_tmp->filename, PATH_MAX);
51691+
51692+ if (!len || len >= PATH_MAX)
51693+ return -EINVAL;
51694+
51695+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51696+ return -ENOMEM;
51697+
51698+ if (copy_from_user(tmp, o_tmp->filename, len))
51699+ return -EFAULT;
51700+ tmp[len-1] = '\0';
51701+ o_tmp->filename = tmp;
51702+
51703+ insert_acl_obj_label(o_tmp, subj);
51704+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51705+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51706+ return -ENOMEM;
51707+
51708+ ret = copy_user_glob(o_tmp);
51709+ if (ret)
51710+ return ret;
51711+
51712+ if (o_tmp->nested) {
51713+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51714+ if (IS_ERR(o_tmp->nested))
51715+ return PTR_ERR(o_tmp->nested);
51716+
51717+ /* insert into nested subject list */
51718+ o_tmp->nested->next = role->hash->first;
51719+ role->hash->first = o_tmp->nested;
51720+ }
51721+ }
51722+
51723+ return 0;
51724+}
51725+
51726+static __u32
51727+count_user_subjs(struct acl_subject_label *userp)
51728+{
51729+ struct acl_subject_label s_tmp;
51730+ __u32 num = 0;
51731+
51732+ while (userp) {
51733+ if (copy_from_user(&s_tmp, userp,
51734+ sizeof (struct acl_subject_label)))
51735+ break;
51736+
51737+ userp = s_tmp.prev;
51738+ /* do not count nested subjects against this count, since
51739+ they are not included in the hash table, but are
51740+ attached to objects. We have already counted
51741+ the subjects in userspace for the allocation
51742+ stack
51743+ */
51744+ if (!(s_tmp.mode & GR_NESTED))
51745+ num++;
51746+ }
51747+
51748+ return num;
51749+}
51750+
51751+static int
51752+copy_user_allowedips(struct acl_role_label *rolep)
51753+{
51754+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51755+
51756+ ruserip = rolep->allowed_ips;
51757+
51758+ while (ruserip) {
51759+ rlast = rtmp;
51760+
51761+ if ((rtmp = (struct role_allowed_ip *)
51762+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51763+ return -ENOMEM;
51764+
51765+ if (copy_from_user(rtmp, ruserip,
51766+ sizeof (struct role_allowed_ip)))
51767+ return -EFAULT;
51768+
51769+ ruserip = rtmp->prev;
51770+
51771+ if (!rlast) {
51772+ rtmp->prev = NULL;
51773+ rolep->allowed_ips = rtmp;
51774+ } else {
51775+ rlast->next = rtmp;
51776+ rtmp->prev = rlast;
51777+ }
51778+
51779+ if (!ruserip)
51780+ rtmp->next = NULL;
51781+ }
51782+
51783+ return 0;
51784+}
51785+
51786+static int
51787+copy_user_transitions(struct acl_role_label *rolep)
51788+{
51789+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
51790+
51791+ unsigned int len;
51792+ char *tmp;
51793+
51794+ rusertp = rolep->transitions;
51795+
51796+ while (rusertp) {
51797+ rlast = rtmp;
51798+
51799+ if ((rtmp = (struct role_transition *)
51800+ acl_alloc(sizeof (struct role_transition))) == NULL)
51801+ return -ENOMEM;
51802+
51803+ if (copy_from_user(rtmp, rusertp,
51804+ sizeof (struct role_transition)))
51805+ return -EFAULT;
51806+
51807+ rusertp = rtmp->prev;
51808+
51809+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51810+
51811+ if (!len || len >= GR_SPROLE_LEN)
51812+ return -EINVAL;
51813+
51814+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51815+ return -ENOMEM;
51816+
51817+ if (copy_from_user(tmp, rtmp->rolename, len))
51818+ return -EFAULT;
51819+ tmp[len-1] = '\0';
51820+ rtmp->rolename = tmp;
51821+
51822+ if (!rlast) {
51823+ rtmp->prev = NULL;
51824+ rolep->transitions = rtmp;
51825+ } else {
51826+ rlast->next = rtmp;
51827+ rtmp->prev = rlast;
51828+ }
51829+
51830+ if (!rusertp)
51831+ rtmp->next = NULL;
51832+ }
51833+
51834+ return 0;
51835+}
51836+
51837+static struct acl_subject_label *
51838+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51839+{
51840+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51841+ unsigned int len;
51842+ char *tmp;
51843+ __u32 num_objs;
51844+ struct acl_ip_label **i_tmp, *i_utmp2;
51845+ struct gr_hash_struct ghash;
51846+ struct subject_map *subjmap;
51847+ unsigned int i_num;
51848+ int err;
51849+
51850+ s_tmp = lookup_subject_map(userp);
51851+
51852+ /* we've already copied this subject into the kernel, just return
51853+ the reference to it, and don't copy it over again
51854+ */
51855+ if (s_tmp)
51856+ return(s_tmp);
51857+
51858+ if ((s_tmp = (struct acl_subject_label *)
51859+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51860+ return ERR_PTR(-ENOMEM);
51861+
51862+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51863+ if (subjmap == NULL)
51864+ return ERR_PTR(-ENOMEM);
51865+
51866+ subjmap->user = userp;
51867+ subjmap->kernel = s_tmp;
51868+ insert_subj_map_entry(subjmap);
51869+
51870+ if (copy_from_user(s_tmp, userp,
51871+ sizeof (struct acl_subject_label)))
51872+ return ERR_PTR(-EFAULT);
51873+
51874+ len = strnlen_user(s_tmp->filename, PATH_MAX);
51875+
51876+ if (!len || len >= PATH_MAX)
51877+ return ERR_PTR(-EINVAL);
51878+
51879+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51880+ return ERR_PTR(-ENOMEM);
51881+
51882+ if (copy_from_user(tmp, s_tmp->filename, len))
51883+ return ERR_PTR(-EFAULT);
51884+ tmp[len-1] = '\0';
51885+ s_tmp->filename = tmp;
51886+
51887+ if (!strcmp(s_tmp->filename, "/"))
51888+ role->root_label = s_tmp;
51889+
51890+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51891+ return ERR_PTR(-EFAULT);
51892+
51893+ /* copy user and group transition tables */
51894+
51895+ if (s_tmp->user_trans_num) {
51896+ uid_t *uidlist;
51897+
51898+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51899+ if (uidlist == NULL)
51900+ return ERR_PTR(-ENOMEM);
51901+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51902+ return ERR_PTR(-EFAULT);
51903+
51904+ s_tmp->user_transitions = uidlist;
51905+ }
51906+
51907+ if (s_tmp->group_trans_num) {
51908+ gid_t *gidlist;
51909+
51910+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
51911+ if (gidlist == NULL)
51912+ return ERR_PTR(-ENOMEM);
51913+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
51914+ return ERR_PTR(-EFAULT);
51915+
51916+ s_tmp->group_transitions = gidlist;
51917+ }
51918+
51919+ /* set up object hash table */
51920+ num_objs = count_user_objs(ghash.first);
51921+
51922+ s_tmp->obj_hash_size = num_objs;
51923+ s_tmp->obj_hash =
51924+ (struct acl_object_label **)
51925+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
51926+
51927+ if (!s_tmp->obj_hash)
51928+ return ERR_PTR(-ENOMEM);
51929+
51930+ memset(s_tmp->obj_hash, 0,
51931+ s_tmp->obj_hash_size *
51932+ sizeof (struct acl_object_label *));
51933+
51934+ /* add in objects */
51935+ err = copy_user_objs(ghash.first, s_tmp, role);
51936+
51937+ if (err)
51938+ return ERR_PTR(err);
51939+
51940+ /* set pointer for parent subject */
51941+ if (s_tmp->parent_subject) {
51942+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
51943+
51944+ if (IS_ERR(s_tmp2))
51945+ return s_tmp2;
51946+
51947+ s_tmp->parent_subject = s_tmp2;
51948+ }
51949+
51950+ /* add in ip acls */
51951+
51952+ if (!s_tmp->ip_num) {
51953+ s_tmp->ips = NULL;
51954+ goto insert;
51955+ }
51956+
51957+ i_tmp =
51958+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
51959+ sizeof (struct acl_ip_label *));
51960+
51961+ if (!i_tmp)
51962+ return ERR_PTR(-ENOMEM);
51963+
51964+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
51965+ *(i_tmp + i_num) =
51966+ (struct acl_ip_label *)
51967+ acl_alloc(sizeof (struct acl_ip_label));
51968+ if (!*(i_tmp + i_num))
51969+ return ERR_PTR(-ENOMEM);
51970+
51971+ if (copy_from_user
51972+ (&i_utmp2, s_tmp->ips + i_num,
51973+ sizeof (struct acl_ip_label *)))
51974+ return ERR_PTR(-EFAULT);
51975+
51976+ if (copy_from_user
51977+ (*(i_tmp + i_num), i_utmp2,
51978+ sizeof (struct acl_ip_label)))
51979+ return ERR_PTR(-EFAULT);
51980+
51981+ if ((*(i_tmp + i_num))->iface == NULL)
51982+ continue;
51983+
51984+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
51985+ if (!len || len >= IFNAMSIZ)
51986+ return ERR_PTR(-EINVAL);
51987+ tmp = acl_alloc(len);
51988+ if (tmp == NULL)
51989+ return ERR_PTR(-ENOMEM);
51990+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
51991+ return ERR_PTR(-EFAULT);
51992+ (*(i_tmp + i_num))->iface = tmp;
51993+ }
51994+
51995+ s_tmp->ips = i_tmp;
51996+
51997+insert:
51998+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
51999+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
52000+ return ERR_PTR(-ENOMEM);
52001+
52002+ return s_tmp;
52003+}
52004+
52005+static int
52006+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
52007+{
52008+ struct acl_subject_label s_pre;
52009+ struct acl_subject_label * ret;
52010+ int err;
52011+
52012+ while (userp) {
52013+ if (copy_from_user(&s_pre, userp,
52014+ sizeof (struct acl_subject_label)))
52015+ return -EFAULT;
52016+
52017+ /* do not add nested subjects here, add
52018+ while parsing objects
52019+ */
52020+
52021+ if (s_pre.mode & GR_NESTED) {
52022+ userp = s_pre.prev;
52023+ continue;
52024+ }
52025+
52026+ ret = do_copy_user_subj(userp, role);
52027+
52028+ err = PTR_ERR(ret);
52029+ if (IS_ERR(ret))
52030+ return err;
52031+
52032+ insert_acl_subj_label(ret, role);
52033+
52034+ userp = s_pre.prev;
52035+ }
52036+
52037+ return 0;
52038+}
52039+
52040+static int
52041+copy_user_acl(struct gr_arg *arg)
52042+{
52043+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
52044+ struct sprole_pw *sptmp;
52045+ struct gr_hash_struct *ghash;
52046+ uid_t *domainlist;
52047+ unsigned int r_num;
52048+ unsigned int len;
52049+ char *tmp;
52050+ int err = 0;
52051+ __u16 i;
52052+ __u32 num_subjs;
52053+
52054+ /* we need a default and kernel role */
52055+ if (arg->role_db.num_roles < 2)
52056+ return -EINVAL;
52057+
52058+ /* copy special role authentication info from userspace */
52059+
52060+ num_sprole_pws = arg->num_sprole_pws;
52061+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
52062+
52063+ if (!acl_special_roles && num_sprole_pws)
52064+ return -ENOMEM;
52065+
52066+ for (i = 0; i < num_sprole_pws; i++) {
52067+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
52068+ if (!sptmp)
52069+ return -ENOMEM;
52070+ if (copy_from_user(sptmp, arg->sprole_pws + i,
52071+ sizeof (struct sprole_pw)))
52072+ return -EFAULT;
52073+
52074+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
52075+
52076+ if (!len || len >= GR_SPROLE_LEN)
52077+ return -EINVAL;
52078+
52079+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52080+ return -ENOMEM;
52081+
52082+ if (copy_from_user(tmp, sptmp->rolename, len))
52083+ return -EFAULT;
52084+
52085+ tmp[len-1] = '\0';
52086+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52087+ printk(KERN_ALERT "Copying special role %s\n", tmp);
52088+#endif
52089+ sptmp->rolename = tmp;
52090+ acl_special_roles[i] = sptmp;
52091+ }
52092+
52093+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
52094+
52095+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
52096+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
52097+
52098+ if (!r_tmp)
52099+ return -ENOMEM;
52100+
52101+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
52102+ sizeof (struct acl_role_label *)))
52103+ return -EFAULT;
52104+
52105+ if (copy_from_user(r_tmp, r_utmp2,
52106+ sizeof (struct acl_role_label)))
52107+ return -EFAULT;
52108+
52109+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
52110+
52111+ if (!len || len >= PATH_MAX)
52112+ return -EINVAL;
52113+
52114+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52115+ return -ENOMEM;
52116+
52117+ if (copy_from_user(tmp, r_tmp->rolename, len))
52118+ return -EFAULT;
52119+
52120+ tmp[len-1] = '\0';
52121+ r_tmp->rolename = tmp;
52122+
52123+ if (!strcmp(r_tmp->rolename, "default")
52124+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
52125+ default_role = r_tmp;
52126+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
52127+ kernel_role = r_tmp;
52128+ }
52129+
52130+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
52131+ return -ENOMEM;
52132+
52133+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
52134+ return -EFAULT;
52135+
52136+ r_tmp->hash = ghash;
52137+
52138+ num_subjs = count_user_subjs(r_tmp->hash->first);
52139+
52140+ r_tmp->subj_hash_size = num_subjs;
52141+ r_tmp->subj_hash =
52142+ (struct acl_subject_label **)
52143+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
52144+
52145+ if (!r_tmp->subj_hash)
52146+ return -ENOMEM;
52147+
52148+ err = copy_user_allowedips(r_tmp);
52149+ if (err)
52150+ return err;
52151+
52152+ /* copy domain info */
52153+ if (r_tmp->domain_children != NULL) {
52154+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
52155+ if (domainlist == NULL)
52156+ return -ENOMEM;
52157+
52158+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
52159+ return -EFAULT;
52160+
52161+ r_tmp->domain_children = domainlist;
52162+ }
52163+
52164+ err = copy_user_transitions(r_tmp);
52165+ if (err)
52166+ return err;
52167+
52168+ memset(r_tmp->subj_hash, 0,
52169+ r_tmp->subj_hash_size *
52170+ sizeof (struct acl_subject_label *));
52171+
52172+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
52173+
52174+ if (err)
52175+ return err;
52176+
52177+ /* set nested subject list to null */
52178+ r_tmp->hash->first = NULL;
52179+
52180+ insert_acl_role_label(r_tmp);
52181+ }
52182+
52183+ if (default_role == NULL || kernel_role == NULL)
52184+ return -EINVAL;
52185+
52186+ return err;
52187+}
52188+
52189+static int
52190+gracl_init(struct gr_arg *args)
52191+{
52192+ int error = 0;
52193+
52194+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
52195+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
52196+
52197+ if (init_variables(args)) {
52198+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
52199+ error = -ENOMEM;
52200+ free_variables();
52201+ goto out;
52202+ }
52203+
52204+ error = copy_user_acl(args);
52205+ free_init_variables();
52206+ if (error) {
52207+ free_variables();
52208+ goto out;
52209+ }
52210+
52211+ if ((error = gr_set_acls(0))) {
52212+ free_variables();
52213+ goto out;
52214+ }
52215+
52216+ pax_open_kernel();
52217+ gr_status |= GR_READY;
52218+ pax_close_kernel();
52219+
52220+ out:
52221+ return error;
52222+}
52223+
52224+/* derived from glibc fnmatch() 0: match, 1: no match*/
52225+
52226+static int
52227+glob_match(const char *p, const char *n)
52228+{
52229+ char c;
52230+
52231+ while ((c = *p++) != '\0') {
52232+ switch (c) {
52233+ case '?':
52234+ if (*n == '\0')
52235+ return 1;
52236+ else if (*n == '/')
52237+ return 1;
52238+ break;
52239+ case '\\':
52240+ if (*n != c)
52241+ return 1;
52242+ break;
52243+ case '*':
52244+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
52245+ if (*n == '/')
52246+ return 1;
52247+ else if (c == '?') {
52248+ if (*n == '\0')
52249+ return 1;
52250+ else
52251+ ++n;
52252+ }
52253+ }
52254+ if (c == '\0') {
52255+ return 0;
52256+ } else {
52257+ const char *endp;
52258+
52259+ if ((endp = strchr(n, '/')) == NULL)
52260+ endp = n + strlen(n);
52261+
52262+ if (c == '[') {
52263+ for (--p; n < endp; ++n)
52264+ if (!glob_match(p, n))
52265+ return 0;
52266+ } else if (c == '/') {
52267+ while (*n != '\0' && *n != '/')
52268+ ++n;
52269+ if (*n == '/' && !glob_match(p, n + 1))
52270+ return 0;
52271+ } else {
52272+ for (--p; n < endp; ++n)
52273+ if (*n == c && !glob_match(p, n))
52274+ return 0;
52275+ }
52276+
52277+ return 1;
52278+ }
52279+ case '[':
52280+ {
52281+ int not;
52282+ char cold;
52283+
52284+ if (*n == '\0' || *n == '/')
52285+ return 1;
52286+
52287+ not = (*p == '!' || *p == '^');
52288+ if (not)
52289+ ++p;
52290+
52291+ c = *p++;
52292+ for (;;) {
52293+ unsigned char fn = (unsigned char)*n;
52294+
52295+ if (c == '\0')
52296+ return 1;
52297+ else {
52298+ if (c == fn)
52299+ goto matched;
52300+ cold = c;
52301+ c = *p++;
52302+
52303+ if (c == '-' && *p != ']') {
52304+ unsigned char cend = *p++;
52305+
52306+ if (cend == '\0')
52307+ return 1;
52308+
52309+ if (cold <= fn && fn <= cend)
52310+ goto matched;
52311+
52312+ c = *p++;
52313+ }
52314+ }
52315+
52316+ if (c == ']')
52317+ break;
52318+ }
52319+ if (!not)
52320+ return 1;
52321+ break;
52322+ matched:
52323+ while (c != ']') {
52324+ if (c == '\0')
52325+ return 1;
52326+
52327+ c = *p++;
52328+ }
52329+ if (not)
52330+ return 1;
52331+ }
52332+ break;
52333+ default:
52334+ if (c != *n)
52335+ return 1;
52336+ }
52337+
52338+ ++n;
52339+ }
52340+
52341+ if (*n == '\0')
52342+ return 0;
52343+
52344+ if (*n == '/')
52345+ return 0;
52346+
52347+ return 1;
52348+}
52349+
52350+static struct acl_object_label *
52351+chk_glob_label(struct acl_object_label *globbed,
52352+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
52353+{
52354+ struct acl_object_label *tmp;
52355+
52356+ if (*path == NULL)
52357+ *path = gr_to_filename_nolock(dentry, mnt);
52358+
52359+ tmp = globbed;
52360+
52361+ while (tmp) {
52362+ if (!glob_match(tmp->filename, *path))
52363+ return tmp;
52364+ tmp = tmp->next;
52365+ }
52366+
52367+ return NULL;
52368+}
52369+
52370+static struct acl_object_label *
52371+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52372+ const ino_t curr_ino, const dev_t curr_dev,
52373+ const struct acl_subject_label *subj, char **path, const int checkglob)
52374+{
52375+ struct acl_subject_label *tmpsubj;
52376+ struct acl_object_label *retval;
52377+ struct acl_object_label *retval2;
52378+
52379+ tmpsubj = (struct acl_subject_label *) subj;
52380+ read_lock(&gr_inode_lock);
52381+ do {
52382+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
52383+ if (retval) {
52384+ if (checkglob && retval->globbed) {
52385+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
52386+ if (retval2)
52387+ retval = retval2;
52388+ }
52389+ break;
52390+ }
52391+ } while ((tmpsubj = tmpsubj->parent_subject));
52392+ read_unlock(&gr_inode_lock);
52393+
52394+ return retval;
52395+}
52396+
52397+static __inline__ struct acl_object_label *
52398+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52399+ struct dentry *curr_dentry,
52400+ const struct acl_subject_label *subj, char **path, const int checkglob)
52401+{
52402+ int newglob = checkglob;
52403+ ino_t inode;
52404+ dev_t device;
52405+
52406+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
52407+ as we don't want a / * rule to match instead of the / object
52408+ don't do this for create lookups that call this function though, since they're looking up
52409+ on the parent and thus need globbing checks on all paths
52410+ */
52411+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
52412+ newglob = GR_NO_GLOB;
52413+
52414+ spin_lock(&curr_dentry->d_lock);
52415+ inode = curr_dentry->d_inode->i_ino;
52416+ device = __get_dev(curr_dentry);
52417+ spin_unlock(&curr_dentry->d_lock);
52418+
52419+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
52420+}
52421+
52422+static struct acl_object_label *
52423+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52424+ const struct acl_subject_label *subj, char *path, const int checkglob)
52425+{
52426+ struct dentry *dentry = (struct dentry *) l_dentry;
52427+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52428+ struct mount *real_mnt = real_mount(mnt);
52429+ struct acl_object_label *retval;
52430+ struct dentry *parent;
52431+
52432+ write_seqlock(&rename_lock);
52433+ br_read_lock(vfsmount_lock);
52434+
52435+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
52436+#ifdef CONFIG_NET
52437+ mnt == sock_mnt ||
52438+#endif
52439+#ifdef CONFIG_HUGETLBFS
52440+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
52441+#endif
52442+ /* ignore Eric Biederman */
52443+ IS_PRIVATE(l_dentry->d_inode))) {
52444+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
52445+ goto out;
52446+ }
52447+
52448+ for (;;) {
52449+ if (dentry == real_root.dentry && mnt == real_root.mnt)
52450+ break;
52451+
52452+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52453+ if (!mnt_has_parent(real_mnt))
52454+ break;
52455+
52456+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52457+ if (retval != NULL)
52458+ goto out;
52459+
52460+ dentry = real_mnt->mnt_mountpoint;
52461+ real_mnt = real_mnt->mnt_parent;
52462+ mnt = &real_mnt->mnt;
52463+ continue;
52464+ }
52465+
52466+ parent = dentry->d_parent;
52467+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52468+ if (retval != NULL)
52469+ goto out;
52470+
52471+ dentry = parent;
52472+ }
52473+
52474+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52475+
52476+ /* real_root is pinned so we don't have to hold a reference */
52477+ if (retval == NULL)
52478+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
52479+out:
52480+ br_read_unlock(vfsmount_lock);
52481+ write_sequnlock(&rename_lock);
52482+
52483+ BUG_ON(retval == NULL);
52484+
52485+ return retval;
52486+}
52487+
52488+static __inline__ struct acl_object_label *
52489+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52490+ const struct acl_subject_label *subj)
52491+{
52492+ char *path = NULL;
52493+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
52494+}
52495+
52496+static __inline__ struct acl_object_label *
52497+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52498+ const struct acl_subject_label *subj)
52499+{
52500+ char *path = NULL;
52501+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
52502+}
52503+
52504+static __inline__ struct acl_object_label *
52505+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52506+ const struct acl_subject_label *subj, char *path)
52507+{
52508+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
52509+}
52510+
52511+static struct acl_subject_label *
52512+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52513+ const struct acl_role_label *role)
52514+{
52515+ struct dentry *dentry = (struct dentry *) l_dentry;
52516+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52517+ struct mount *real_mnt = real_mount(mnt);
52518+ struct acl_subject_label *retval;
52519+ struct dentry *parent;
52520+
52521+ write_seqlock(&rename_lock);
52522+ br_read_lock(vfsmount_lock);
52523+
52524+ for (;;) {
52525+ if (dentry == real_root.dentry && mnt == real_root.mnt)
52526+ break;
52527+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52528+ if (!mnt_has_parent(real_mnt))
52529+ break;
52530+
52531+ spin_lock(&dentry->d_lock);
52532+ read_lock(&gr_inode_lock);
52533+ retval =
52534+ lookup_acl_subj_label(dentry->d_inode->i_ino,
52535+ __get_dev(dentry), role);
52536+ read_unlock(&gr_inode_lock);
52537+ spin_unlock(&dentry->d_lock);
52538+ if (retval != NULL)
52539+ goto out;
52540+
52541+ dentry = real_mnt->mnt_mountpoint;
52542+ real_mnt = real_mnt->mnt_parent;
52543+ mnt = &real_mnt->mnt;
52544+ continue;
52545+ }
52546+
52547+ spin_lock(&dentry->d_lock);
52548+ read_lock(&gr_inode_lock);
52549+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52550+ __get_dev(dentry), role);
52551+ read_unlock(&gr_inode_lock);
52552+ parent = dentry->d_parent;
52553+ spin_unlock(&dentry->d_lock);
52554+
52555+ if (retval != NULL)
52556+ goto out;
52557+
52558+ dentry = parent;
52559+ }
52560+
52561+ spin_lock(&dentry->d_lock);
52562+ read_lock(&gr_inode_lock);
52563+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52564+ __get_dev(dentry), role);
52565+ read_unlock(&gr_inode_lock);
52566+ spin_unlock(&dentry->d_lock);
52567+
52568+ if (unlikely(retval == NULL)) {
52569+ /* real_root is pinned, we don't need to hold a reference */
52570+ read_lock(&gr_inode_lock);
52571+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
52572+ __get_dev(real_root.dentry), role);
52573+ read_unlock(&gr_inode_lock);
52574+ }
52575+out:
52576+ br_read_unlock(vfsmount_lock);
52577+ write_sequnlock(&rename_lock);
52578+
52579+ BUG_ON(retval == NULL);
52580+
52581+ return retval;
52582+}
52583+
52584+static void
52585+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52586+{
52587+ struct task_struct *task = current;
52588+ const struct cred *cred = current_cred();
52589+
52590+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52591+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52592+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52593+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
52594+
52595+ return;
52596+}
52597+
52598+static void
52599+gr_log_learn_sysctl(const char *path, const __u32 mode)
52600+{
52601+ struct task_struct *task = current;
52602+ const struct cred *cred = current_cred();
52603+
52604+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52605+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52606+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52607+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
52608+
52609+ return;
52610+}
52611+
52612+static void
52613+gr_log_learn_id_change(const char type, const unsigned int real,
52614+ const unsigned int effective, const unsigned int fs)
52615+{
52616+ struct task_struct *task = current;
52617+ const struct cred *cred = current_cred();
52618+
52619+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52620+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52621+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52622+ type, real, effective, fs, &task->signal->saved_ip);
52623+
52624+ return;
52625+}
52626+
52627+__u32
52628+gr_search_file(const struct dentry * dentry, const __u32 mode,
52629+ const struct vfsmount * mnt)
52630+{
52631+ __u32 retval = mode;
52632+ struct acl_subject_label *curracl;
52633+ struct acl_object_label *currobj;
52634+
52635+ if (unlikely(!(gr_status & GR_READY)))
52636+ return (mode & ~GR_AUDITS);
52637+
52638+ curracl = current->acl;
52639+
52640+ currobj = chk_obj_label(dentry, mnt, curracl);
52641+ retval = currobj->mode & mode;
52642+
52643+ /* if we're opening a specified transfer file for writing
52644+ (e.g. /dev/initctl), then transfer our role to init
52645+ */
52646+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52647+ current->role->roletype & GR_ROLE_PERSIST)) {
52648+ struct task_struct *task = init_pid_ns.child_reaper;
52649+
52650+ if (task->role != current->role) {
52651+ task->acl_sp_role = 0;
52652+ task->acl_role_id = current->acl_role_id;
52653+ task->role = current->role;
52654+ rcu_read_lock();
52655+ read_lock(&grsec_exec_file_lock);
52656+ gr_apply_subject_to_task(task);
52657+ read_unlock(&grsec_exec_file_lock);
52658+ rcu_read_unlock();
52659+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52660+ }
52661+ }
52662+
52663+ if (unlikely
52664+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52665+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52666+ __u32 new_mode = mode;
52667+
52668+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52669+
52670+ retval = new_mode;
52671+
52672+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52673+ new_mode |= GR_INHERIT;
52674+
52675+ if (!(mode & GR_NOLEARN))
52676+ gr_log_learn(dentry, mnt, new_mode);
52677+ }
52678+
52679+ return retval;
52680+}
52681+
52682+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52683+ const struct dentry *parent,
52684+ const struct vfsmount *mnt)
52685+{
52686+ struct name_entry *match;
52687+ struct acl_object_label *matchpo;
52688+ struct acl_subject_label *curracl;
52689+ char *path;
52690+
52691+ if (unlikely(!(gr_status & GR_READY)))
52692+ return NULL;
52693+
52694+ preempt_disable();
52695+ path = gr_to_filename_rbac(new_dentry, mnt);
52696+ match = lookup_name_entry_create(path);
52697+
52698+ curracl = current->acl;
52699+
52700+ if (match) {
52701+ read_lock(&gr_inode_lock);
52702+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52703+ read_unlock(&gr_inode_lock);
52704+
52705+ if (matchpo) {
52706+ preempt_enable();
52707+ return matchpo;
52708+ }
52709+ }
52710+
52711+ // lookup parent
52712+
52713+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
52714+
52715+ preempt_enable();
52716+ return matchpo;
52717+}
52718+
52719+__u32
52720+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52721+ const struct vfsmount * mnt, const __u32 mode)
52722+{
52723+ struct acl_object_label *matchpo;
52724+ __u32 retval;
52725+
52726+ if (unlikely(!(gr_status & GR_READY)))
52727+ return (mode & ~GR_AUDITS);
52728+
52729+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
52730+
52731+ retval = matchpo->mode & mode;
52732+
52733+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
52734+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52735+ __u32 new_mode = mode;
52736+
52737+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52738+
52739+ gr_log_learn(new_dentry, mnt, new_mode);
52740+ return new_mode;
52741+ }
52742+
52743+ return retval;
52744+}
52745+
52746+__u32
52747+gr_check_link(const struct dentry * new_dentry,
52748+ const struct dentry * parent_dentry,
52749+ const struct vfsmount * parent_mnt,
52750+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52751+{
52752+ struct acl_object_label *obj;
52753+ __u32 oldmode, newmode;
52754+ __u32 needmode;
52755+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52756+ GR_DELETE | GR_INHERIT;
52757+
52758+ if (unlikely(!(gr_status & GR_READY)))
52759+ return (GR_CREATE | GR_LINK);
52760+
52761+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52762+ oldmode = obj->mode;
52763+
52764+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52765+ newmode = obj->mode;
52766+
52767+ needmode = newmode & checkmodes;
52768+
52769+ // old name for hardlink must have at least the permissions of the new name
52770+ if ((oldmode & needmode) != needmode)
52771+ goto bad;
52772+
52773+ // if old name had restrictions/auditing, make sure the new name does as well
52774+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52775+
52776+ // don't allow hardlinking of suid/sgid files without permission
52777+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52778+ needmode |= GR_SETID;
52779+
52780+ if ((newmode & needmode) != needmode)
52781+ goto bad;
52782+
52783+ // enforce minimum permissions
52784+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52785+ return newmode;
52786+bad:
52787+ needmode = oldmode;
52788+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52789+ needmode |= GR_SETID;
52790+
52791+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52792+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52793+ return (GR_CREATE | GR_LINK);
52794+ } else if (newmode & GR_SUPPRESS)
52795+ return GR_SUPPRESS;
52796+ else
52797+ return 0;
52798+}
52799+
52800+int
52801+gr_check_hidden_task(const struct task_struct *task)
52802+{
52803+ if (unlikely(!(gr_status & GR_READY)))
52804+ return 0;
52805+
52806+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52807+ return 1;
52808+
52809+ return 0;
52810+}
52811+
52812+int
52813+gr_check_protected_task(const struct task_struct *task)
52814+{
52815+ if (unlikely(!(gr_status & GR_READY) || !task))
52816+ return 0;
52817+
52818+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52819+ task->acl != current->acl)
52820+ return 1;
52821+
52822+ return 0;
52823+}
52824+
52825+int
52826+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52827+{
52828+ struct task_struct *p;
52829+ int ret = 0;
52830+
52831+ if (unlikely(!(gr_status & GR_READY) || !pid))
52832+ return ret;
52833+
52834+ read_lock(&tasklist_lock);
52835+ do_each_pid_task(pid, type, p) {
52836+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52837+ p->acl != current->acl) {
52838+ ret = 1;
52839+ goto out;
52840+ }
52841+ } while_each_pid_task(pid, type, p);
52842+out:
52843+ read_unlock(&tasklist_lock);
52844+
52845+ return ret;
52846+}
52847+
52848+void
52849+gr_copy_label(struct task_struct *tsk)
52850+{
52851+ /* plain copying of fields is already done by dup_task_struct */
52852+ tsk->signal->used_accept = 0;
52853+ tsk->acl_sp_role = 0;
52854+ //tsk->acl_role_id = current->acl_role_id;
52855+ //tsk->acl = current->acl;
52856+ //tsk->role = current->role;
52857+ tsk->signal->curr_ip = current->signal->curr_ip;
52858+ tsk->signal->saved_ip = current->signal->saved_ip;
52859+ if (current->exec_file)
52860+ get_file(current->exec_file);
52861+ //tsk->exec_file = current->exec_file;
52862+ //tsk->is_writable = current->is_writable;
52863+ if (unlikely(current->signal->used_accept)) {
52864+ current->signal->curr_ip = 0;
52865+ current->signal->saved_ip = 0;
52866+ }
52867+
52868+ return;
52869+}
52870+
52871+static void
52872+gr_set_proc_res(struct task_struct *task)
52873+{
52874+ struct acl_subject_label *proc;
52875+ unsigned short i;
52876+
52877+ proc = task->acl;
52878+
52879+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52880+ return;
52881+
52882+ for (i = 0; i < RLIM_NLIMITS; i++) {
52883+ if (!(proc->resmask & (1 << i)))
52884+ continue;
52885+
52886+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52887+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52888+ }
52889+
52890+ return;
52891+}
52892+
52893+extern int __gr_process_user_ban(struct user_struct *user);
52894+
52895+int
52896+gr_check_user_change(int real, int effective, int fs)
52897+{
52898+ unsigned int i;
52899+ __u16 num;
52900+ uid_t *uidlist;
52901+ int curuid;
52902+ int realok = 0;
52903+ int effectiveok = 0;
52904+ int fsok = 0;
52905+
52906+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52907+ struct user_struct *user;
52908+
52909+ if (real == -1)
52910+ goto skipit;
52911+
52912+ user = find_user(real);
52913+ if (user == NULL)
52914+ goto skipit;
52915+
52916+ if (__gr_process_user_ban(user)) {
52917+ /* for find_user */
52918+ free_uid(user);
52919+ return 1;
52920+ }
52921+
52922+ /* for find_user */
52923+ free_uid(user);
52924+
52925+skipit:
52926+#endif
52927+
52928+ if (unlikely(!(gr_status & GR_READY)))
52929+ return 0;
52930+
52931+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52932+ gr_log_learn_id_change('u', real, effective, fs);
52933+
52934+ num = current->acl->user_trans_num;
52935+ uidlist = current->acl->user_transitions;
52936+
52937+ if (uidlist == NULL)
52938+ return 0;
52939+
52940+ if (real == -1)
52941+ realok = 1;
52942+ if (effective == -1)
52943+ effectiveok = 1;
52944+ if (fs == -1)
52945+ fsok = 1;
52946+
52947+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
52948+ for (i = 0; i < num; i++) {
52949+ curuid = (int)uidlist[i];
52950+ if (real == curuid)
52951+ realok = 1;
52952+ if (effective == curuid)
52953+ effectiveok = 1;
52954+ if (fs == curuid)
52955+ fsok = 1;
52956+ }
52957+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
52958+ for (i = 0; i < num; i++) {
52959+ curuid = (int)uidlist[i];
52960+ if (real == curuid)
52961+ break;
52962+ if (effective == curuid)
52963+ break;
52964+ if (fs == curuid)
52965+ break;
52966+ }
52967+ /* not in deny list */
52968+ if (i == num) {
52969+ realok = 1;
52970+ effectiveok = 1;
52971+ fsok = 1;
52972+ }
52973+ }
52974+
52975+ if (realok && effectiveok && fsok)
52976+ return 0;
52977+ else {
52978+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52979+ return 1;
52980+ }
52981+}
52982+
52983+int
52984+gr_check_group_change(int real, int effective, int fs)
52985+{
52986+ unsigned int i;
52987+ __u16 num;
52988+ gid_t *gidlist;
52989+ int curgid;
52990+ int realok = 0;
52991+ int effectiveok = 0;
52992+ int fsok = 0;
52993+
52994+ if (unlikely(!(gr_status & GR_READY)))
52995+ return 0;
52996+
52997+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52998+ gr_log_learn_id_change('g', real, effective, fs);
52999+
53000+ num = current->acl->group_trans_num;
53001+ gidlist = current->acl->group_transitions;
53002+
53003+ if (gidlist == NULL)
53004+ return 0;
53005+
53006+ if (real == -1)
53007+ realok = 1;
53008+ if (effective == -1)
53009+ effectiveok = 1;
53010+ if (fs == -1)
53011+ fsok = 1;
53012+
53013+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
53014+ for (i = 0; i < num; i++) {
53015+ curgid = (int)gidlist[i];
53016+ if (real == curgid)
53017+ realok = 1;
53018+ if (effective == curgid)
53019+ effectiveok = 1;
53020+ if (fs == curgid)
53021+ fsok = 1;
53022+ }
53023+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
53024+ for (i = 0; i < num; i++) {
53025+ curgid = (int)gidlist[i];
53026+ if (real == curgid)
53027+ break;
53028+ if (effective == curgid)
53029+ break;
53030+ if (fs == curgid)
53031+ break;
53032+ }
53033+ /* not in deny list */
53034+ if (i == num) {
53035+ realok = 1;
53036+ effectiveok = 1;
53037+ fsok = 1;
53038+ }
53039+ }
53040+
53041+ if (realok && effectiveok && fsok)
53042+ return 0;
53043+ else {
53044+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53045+ return 1;
53046+ }
53047+}
53048+
53049+extern int gr_acl_is_capable(const int cap);
53050+
53051+void
53052+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
53053+{
53054+ struct acl_role_label *role = task->role;
53055+ struct acl_subject_label *subj = NULL;
53056+ struct acl_object_label *obj;
53057+ struct file *filp;
53058+
53059+ if (unlikely(!(gr_status & GR_READY)))
53060+ return;
53061+
53062+ filp = task->exec_file;
53063+
53064+ /* kernel process, we'll give them the kernel role */
53065+ if (unlikely(!filp)) {
53066+ task->role = kernel_role;
53067+ task->acl = kernel_role->root_label;
53068+ return;
53069+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
53070+ role = lookup_acl_role_label(task, uid, gid);
53071+
53072+ /* don't change the role if we're not a privileged process */
53073+ if (role && task->role != role &&
53074+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
53075+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
53076+ return;
53077+
53078+ /* perform subject lookup in possibly new role
53079+ we can use this result below in the case where role == task->role
53080+ */
53081+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
53082+
53083+ /* if we changed uid/gid, but result in the same role
53084+ and are using inheritance, don't lose the inherited subject
53085+ if current subject is other than what normal lookup
53086+ would result in, we arrived via inheritance, don't
53087+ lose subject
53088+ */
53089+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
53090+ (subj == task->acl)))
53091+ task->acl = subj;
53092+
53093+ task->role = role;
53094+
53095+ task->is_writable = 0;
53096+
53097+ /* ignore additional mmap checks for processes that are writable
53098+ by the default ACL */
53099+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53100+ if (unlikely(obj->mode & GR_WRITE))
53101+ task->is_writable = 1;
53102+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53103+ if (unlikely(obj->mode & GR_WRITE))
53104+ task->is_writable = 1;
53105+
53106+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53107+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53108+#endif
53109+
53110+ gr_set_proc_res(task);
53111+
53112+ return;
53113+}
53114+
53115+int
53116+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53117+ const int unsafe_flags)
53118+{
53119+ struct task_struct *task = current;
53120+ struct acl_subject_label *newacl;
53121+ struct acl_object_label *obj;
53122+ __u32 retmode;
53123+
53124+ if (unlikely(!(gr_status & GR_READY)))
53125+ return 0;
53126+
53127+ newacl = chk_subj_label(dentry, mnt, task->role);
53128+
53129+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
53130+ did an exec
53131+ */
53132+ rcu_read_lock();
53133+ read_lock(&tasklist_lock);
53134+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
53135+ (task->parent->acl->mode & GR_POVERRIDE))) {
53136+ read_unlock(&tasklist_lock);
53137+ rcu_read_unlock();
53138+ goto skip_check;
53139+ }
53140+ read_unlock(&tasklist_lock);
53141+ rcu_read_unlock();
53142+
53143+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
53144+ !(task->role->roletype & GR_ROLE_GOD) &&
53145+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
53146+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
53147+ if (unsafe_flags & LSM_UNSAFE_SHARE)
53148+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
53149+ else
53150+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
53151+ return -EACCES;
53152+ }
53153+
53154+skip_check:
53155+
53156+ obj = chk_obj_label(dentry, mnt, task->acl);
53157+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
53158+
53159+ if (!(task->acl->mode & GR_INHERITLEARN) &&
53160+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
53161+ if (obj->nested)
53162+ task->acl = obj->nested;
53163+ else
53164+ task->acl = newacl;
53165+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
53166+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
53167+
53168+ task->is_writable = 0;
53169+
53170+ /* ignore additional mmap checks for processes that are writable
53171+ by the default ACL */
53172+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
53173+ if (unlikely(obj->mode & GR_WRITE))
53174+ task->is_writable = 1;
53175+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
53176+ if (unlikely(obj->mode & GR_WRITE))
53177+ task->is_writable = 1;
53178+
53179+ gr_set_proc_res(task);
53180+
53181+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53182+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53183+#endif
53184+ return 0;
53185+}
53186+
53187+/* always called with valid inodev ptr */
53188+static void
53189+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
53190+{
53191+ struct acl_object_label *matchpo;
53192+ struct acl_subject_label *matchps;
53193+ struct acl_subject_label *subj;
53194+ struct acl_role_label *role;
53195+ unsigned int x;
53196+
53197+ FOR_EACH_ROLE_START(role)
53198+ FOR_EACH_SUBJECT_START(role, subj, x)
53199+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
53200+ matchpo->mode |= GR_DELETED;
53201+ FOR_EACH_SUBJECT_END(subj,x)
53202+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
53203+ if (subj->inode == ino && subj->device == dev)
53204+ subj->mode |= GR_DELETED;
53205+ FOR_EACH_NESTED_SUBJECT_END(subj)
53206+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
53207+ matchps->mode |= GR_DELETED;
53208+ FOR_EACH_ROLE_END(role)
53209+
53210+ inodev->nentry->deleted = 1;
53211+
53212+ return;
53213+}
53214+
53215+void
53216+gr_handle_delete(const ino_t ino, const dev_t dev)
53217+{
53218+ struct inodev_entry *inodev;
53219+
53220+ if (unlikely(!(gr_status & GR_READY)))
53221+ return;
53222+
53223+ write_lock(&gr_inode_lock);
53224+ inodev = lookup_inodev_entry(ino, dev);
53225+ if (inodev != NULL)
53226+ do_handle_delete(inodev, ino, dev);
53227+ write_unlock(&gr_inode_lock);
53228+
53229+ return;
53230+}
53231+
53232+static void
53233+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
53234+ const ino_t newinode, const dev_t newdevice,
53235+ struct acl_subject_label *subj)
53236+{
53237+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
53238+ struct acl_object_label *match;
53239+
53240+ match = subj->obj_hash[index];
53241+
53242+ while (match && (match->inode != oldinode ||
53243+ match->device != olddevice ||
53244+ !(match->mode & GR_DELETED)))
53245+ match = match->next;
53246+
53247+ if (match && (match->inode == oldinode)
53248+ && (match->device == olddevice)
53249+ && (match->mode & GR_DELETED)) {
53250+ if (match->prev == NULL) {
53251+ subj->obj_hash[index] = match->next;
53252+ if (match->next != NULL)
53253+ match->next->prev = NULL;
53254+ } else {
53255+ match->prev->next = match->next;
53256+ if (match->next != NULL)
53257+ match->next->prev = match->prev;
53258+ }
53259+ match->prev = NULL;
53260+ match->next = NULL;
53261+ match->inode = newinode;
53262+ match->device = newdevice;
53263+ match->mode &= ~GR_DELETED;
53264+
53265+ insert_acl_obj_label(match, subj);
53266+ }
53267+
53268+ return;
53269+}
53270+
53271+static void
53272+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
53273+ const ino_t newinode, const dev_t newdevice,
53274+ struct acl_role_label *role)
53275+{
53276+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
53277+ struct acl_subject_label *match;
53278+
53279+ match = role->subj_hash[index];
53280+
53281+ while (match && (match->inode != oldinode ||
53282+ match->device != olddevice ||
53283+ !(match->mode & GR_DELETED)))
53284+ match = match->next;
53285+
53286+ if (match && (match->inode == oldinode)
53287+ && (match->device == olddevice)
53288+ && (match->mode & GR_DELETED)) {
53289+ if (match->prev == NULL) {
53290+ role->subj_hash[index] = match->next;
53291+ if (match->next != NULL)
53292+ match->next->prev = NULL;
53293+ } else {
53294+ match->prev->next = match->next;
53295+ if (match->next != NULL)
53296+ match->next->prev = match->prev;
53297+ }
53298+ match->prev = NULL;
53299+ match->next = NULL;
53300+ match->inode = newinode;
53301+ match->device = newdevice;
53302+ match->mode &= ~GR_DELETED;
53303+
53304+ insert_acl_subj_label(match, role);
53305+ }
53306+
53307+ return;
53308+}
53309+
53310+static void
53311+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
53312+ const ino_t newinode, const dev_t newdevice)
53313+{
53314+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
53315+ struct inodev_entry *match;
53316+
53317+ match = inodev_set.i_hash[index];
53318+
53319+ while (match && (match->nentry->inode != oldinode ||
53320+ match->nentry->device != olddevice || !match->nentry->deleted))
53321+ match = match->next;
53322+
53323+ if (match && (match->nentry->inode == oldinode)
53324+ && (match->nentry->device == olddevice) &&
53325+ match->nentry->deleted) {
53326+ if (match->prev == NULL) {
53327+ inodev_set.i_hash[index] = match->next;
53328+ if (match->next != NULL)
53329+ match->next->prev = NULL;
53330+ } else {
53331+ match->prev->next = match->next;
53332+ if (match->next != NULL)
53333+ match->next->prev = match->prev;
53334+ }
53335+ match->prev = NULL;
53336+ match->next = NULL;
53337+ match->nentry->inode = newinode;
53338+ match->nentry->device = newdevice;
53339+ match->nentry->deleted = 0;
53340+
53341+ insert_inodev_entry(match);
53342+ }
53343+
53344+ return;
53345+}
53346+
53347+static void
53348+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
53349+{
53350+ struct acl_subject_label *subj;
53351+ struct acl_role_label *role;
53352+ unsigned int x;
53353+
53354+ FOR_EACH_ROLE_START(role)
53355+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
53356+
53357+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
53358+ if ((subj->inode == ino) && (subj->device == dev)) {
53359+ subj->inode = ino;
53360+ subj->device = dev;
53361+ }
53362+ FOR_EACH_NESTED_SUBJECT_END(subj)
53363+ FOR_EACH_SUBJECT_START(role, subj, x)
53364+ update_acl_obj_label(matchn->inode, matchn->device,
53365+ ino, dev, subj);
53366+ FOR_EACH_SUBJECT_END(subj,x)
53367+ FOR_EACH_ROLE_END(role)
53368+
53369+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
53370+
53371+ return;
53372+}
53373+
53374+static void
53375+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
53376+ const struct vfsmount *mnt)
53377+{
53378+ ino_t ino = dentry->d_inode->i_ino;
53379+ dev_t dev = __get_dev(dentry);
53380+
53381+ __do_handle_create(matchn, ino, dev);
53382+
53383+ return;
53384+}
53385+
53386+void
53387+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53388+{
53389+ struct name_entry *matchn;
53390+
53391+ if (unlikely(!(gr_status & GR_READY)))
53392+ return;
53393+
53394+ preempt_disable();
53395+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
53396+
53397+ if (unlikely((unsigned long)matchn)) {
53398+ write_lock(&gr_inode_lock);
53399+ do_handle_create(matchn, dentry, mnt);
53400+ write_unlock(&gr_inode_lock);
53401+ }
53402+ preempt_enable();
53403+
53404+ return;
53405+}
53406+
53407+void
53408+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53409+{
53410+ struct name_entry *matchn;
53411+
53412+ if (unlikely(!(gr_status & GR_READY)))
53413+ return;
53414+
53415+ preempt_disable();
53416+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
53417+
53418+ if (unlikely((unsigned long)matchn)) {
53419+ write_lock(&gr_inode_lock);
53420+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
53421+ write_unlock(&gr_inode_lock);
53422+ }
53423+ preempt_enable();
53424+
53425+ return;
53426+}
53427+
53428+void
53429+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53430+ struct dentry *old_dentry,
53431+ struct dentry *new_dentry,
53432+ struct vfsmount *mnt, const __u8 replace)
53433+{
53434+ struct name_entry *matchn;
53435+ struct inodev_entry *inodev;
53436+ struct inode *inode = new_dentry->d_inode;
53437+ ino_t old_ino = old_dentry->d_inode->i_ino;
53438+ dev_t old_dev = __get_dev(old_dentry);
53439+
53440+ /* vfs_rename swaps the name and parent link for old_dentry and
53441+ new_dentry
53442+ at this point, old_dentry has the new name, parent link, and inode
53443+ for the renamed file
53444+ if a file is being replaced by a rename, new_dentry has the inode
53445+ and name for the replaced file
53446+ */
53447+
53448+ if (unlikely(!(gr_status & GR_READY)))
53449+ return;
53450+
53451+ preempt_disable();
53452+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
53453+
53454+ /* we wouldn't have to check d_inode if it weren't for
53455+ NFS silly-renaming
53456+ */
53457+
53458+ write_lock(&gr_inode_lock);
53459+ if (unlikely(replace && inode)) {
53460+ ino_t new_ino = inode->i_ino;
53461+ dev_t new_dev = __get_dev(new_dentry);
53462+
53463+ inodev = lookup_inodev_entry(new_ino, new_dev);
53464+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
53465+ do_handle_delete(inodev, new_ino, new_dev);
53466+ }
53467+
53468+ inodev = lookup_inodev_entry(old_ino, old_dev);
53469+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
53470+ do_handle_delete(inodev, old_ino, old_dev);
53471+
53472+ if (unlikely((unsigned long)matchn))
53473+ do_handle_create(matchn, old_dentry, mnt);
53474+
53475+ write_unlock(&gr_inode_lock);
53476+ preempt_enable();
53477+
53478+ return;
53479+}
53480+
53481+static int
53482+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
53483+ unsigned char **sum)
53484+{
53485+ struct acl_role_label *r;
53486+ struct role_allowed_ip *ipp;
53487+ struct role_transition *trans;
53488+ unsigned int i;
53489+ int found = 0;
53490+ u32 curr_ip = current->signal->curr_ip;
53491+
53492+ current->signal->saved_ip = curr_ip;
53493+
53494+ /* check transition table */
53495+
53496+ for (trans = current->role->transitions; trans; trans = trans->next) {
53497+ if (!strcmp(rolename, trans->rolename)) {
53498+ found = 1;
53499+ break;
53500+ }
53501+ }
53502+
53503+ if (!found)
53504+ return 0;
53505+
53506+ /* handle special roles that do not require authentication
53507+ and check ip */
53508+
53509+ FOR_EACH_ROLE_START(r)
53510+ if (!strcmp(rolename, r->rolename) &&
53511+ (r->roletype & GR_ROLE_SPECIAL)) {
53512+ found = 0;
53513+ if (r->allowed_ips != NULL) {
53514+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
53515+ if ((ntohl(curr_ip) & ipp->netmask) ==
53516+ (ntohl(ipp->addr) & ipp->netmask))
53517+ found = 1;
53518+ }
53519+ } else
53520+ found = 2;
53521+ if (!found)
53522+ return 0;
53523+
53524+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53525+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53526+ *salt = NULL;
53527+ *sum = NULL;
53528+ return 1;
53529+ }
53530+ }
53531+ FOR_EACH_ROLE_END(r)
53532+
53533+ for (i = 0; i < num_sprole_pws; i++) {
53534+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53535+ *salt = acl_special_roles[i]->salt;
53536+ *sum = acl_special_roles[i]->sum;
53537+ return 1;
53538+ }
53539+ }
53540+
53541+ return 0;
53542+}
53543+
53544+static void
53545+assign_special_role(char *rolename)
53546+{
53547+ struct acl_object_label *obj;
53548+ struct acl_role_label *r;
53549+ struct acl_role_label *assigned = NULL;
53550+ struct task_struct *tsk;
53551+ struct file *filp;
53552+
53553+ FOR_EACH_ROLE_START(r)
53554+ if (!strcmp(rolename, r->rolename) &&
53555+ (r->roletype & GR_ROLE_SPECIAL)) {
53556+ assigned = r;
53557+ break;
53558+ }
53559+ FOR_EACH_ROLE_END(r)
53560+
53561+ if (!assigned)
53562+ return;
53563+
53564+ read_lock(&tasklist_lock);
53565+ read_lock(&grsec_exec_file_lock);
53566+
53567+ tsk = current->real_parent;
53568+ if (tsk == NULL)
53569+ goto out_unlock;
53570+
53571+ filp = tsk->exec_file;
53572+ if (filp == NULL)
53573+ goto out_unlock;
53574+
53575+ tsk->is_writable = 0;
53576+
53577+ tsk->acl_sp_role = 1;
53578+ tsk->acl_role_id = ++acl_sp_role_value;
53579+ tsk->role = assigned;
53580+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53581+
53582+ /* ignore additional mmap checks for processes that are writable
53583+ by the default ACL */
53584+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53585+ if (unlikely(obj->mode & GR_WRITE))
53586+ tsk->is_writable = 1;
53587+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53588+ if (unlikely(obj->mode & GR_WRITE))
53589+ tsk->is_writable = 1;
53590+
53591+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53592+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53593+#endif
53594+
53595+out_unlock:
53596+ read_unlock(&grsec_exec_file_lock);
53597+ read_unlock(&tasklist_lock);
53598+ return;
53599+}
53600+
53601+int gr_check_secure_terminal(struct task_struct *task)
53602+{
53603+ struct task_struct *p, *p2, *p3;
53604+ struct files_struct *files;
53605+ struct fdtable *fdt;
53606+ struct file *our_file = NULL, *file;
53607+ int i;
53608+
53609+ if (task->signal->tty == NULL)
53610+ return 1;
53611+
53612+ files = get_files_struct(task);
53613+ if (files != NULL) {
53614+ rcu_read_lock();
53615+ fdt = files_fdtable(files);
53616+ for (i=0; i < fdt->max_fds; i++) {
53617+ file = fcheck_files(files, i);
53618+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53619+ get_file(file);
53620+ our_file = file;
53621+ }
53622+ }
53623+ rcu_read_unlock();
53624+ put_files_struct(files);
53625+ }
53626+
53627+ if (our_file == NULL)
53628+ return 1;
53629+
53630+ read_lock(&tasklist_lock);
53631+ do_each_thread(p2, p) {
53632+ files = get_files_struct(p);
53633+ if (files == NULL ||
53634+ (p->signal && p->signal->tty == task->signal->tty)) {
53635+ if (files != NULL)
53636+ put_files_struct(files);
53637+ continue;
53638+ }
53639+ rcu_read_lock();
53640+ fdt = files_fdtable(files);
53641+ for (i=0; i < fdt->max_fds; i++) {
53642+ file = fcheck_files(files, i);
53643+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53644+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53645+ p3 = task;
53646+ while (p3->pid > 0) {
53647+ if (p3 == p)
53648+ break;
53649+ p3 = p3->real_parent;
53650+ }
53651+ if (p3 == p)
53652+ break;
53653+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53654+ gr_handle_alertkill(p);
53655+ rcu_read_unlock();
53656+ put_files_struct(files);
53657+ read_unlock(&tasklist_lock);
53658+ fput(our_file);
53659+ return 0;
53660+ }
53661+ }
53662+ rcu_read_unlock();
53663+ put_files_struct(files);
53664+ } while_each_thread(p2, p);
53665+ read_unlock(&tasklist_lock);
53666+
53667+ fput(our_file);
53668+ return 1;
53669+}
53670+
53671+ssize_t
53672+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53673+{
53674+ struct gr_arg_wrapper uwrap;
53675+ unsigned char *sprole_salt = NULL;
53676+ unsigned char *sprole_sum = NULL;
53677+ int error = sizeof (struct gr_arg_wrapper);
53678+ int error2 = 0;
53679+
53680+ mutex_lock(&gr_dev_mutex);
53681+
53682+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53683+ error = -EPERM;
53684+ goto out;
53685+ }
53686+
53687+ if (count != sizeof (struct gr_arg_wrapper)) {
53688+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53689+ error = -EINVAL;
53690+ goto out;
53691+ }
53692+
53693+
53694+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53695+ gr_auth_expires = 0;
53696+ gr_auth_attempts = 0;
53697+ }
53698+
53699+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53700+ error = -EFAULT;
53701+ goto out;
53702+ }
53703+
53704+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53705+ error = -EINVAL;
53706+ goto out;
53707+ }
53708+
53709+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53710+ error = -EFAULT;
53711+ goto out;
53712+ }
53713+
53714+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53715+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53716+ time_after(gr_auth_expires, get_seconds())) {
53717+ error = -EBUSY;
53718+ goto out;
53719+ }
53720+
53721+ /* if non-root trying to do anything other than use a special role,
53722+ do not attempt authentication, do not count towards authentication
53723+ locking
53724+ */
53725+
53726+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53727+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53728+ current_uid()) {
53729+ error = -EPERM;
53730+ goto out;
53731+ }
53732+
53733+ /* ensure pw and special role name are null terminated */
53734+
53735+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53736+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53737+
53738+ /* Okay.
53739+ * We have our enough of the argument structure..(we have yet
53740+ * to copy_from_user the tables themselves) . Copy the tables
53741+ * only if we need them, i.e. for loading operations. */
53742+
53743+ switch (gr_usermode->mode) {
53744+ case GR_STATUS:
53745+ if (gr_status & GR_READY) {
53746+ error = 1;
53747+ if (!gr_check_secure_terminal(current))
53748+ error = 3;
53749+ } else
53750+ error = 2;
53751+ goto out;
53752+ case GR_SHUTDOWN:
53753+ if ((gr_status & GR_READY)
53754+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53755+ pax_open_kernel();
53756+ gr_status &= ~GR_READY;
53757+ pax_close_kernel();
53758+
53759+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
53760+ free_variables();
53761+ memset(gr_usermode, 0, sizeof (struct gr_arg));
53762+ memset(gr_system_salt, 0, GR_SALT_LEN);
53763+ memset(gr_system_sum, 0, GR_SHA_LEN);
53764+ } else if (gr_status & GR_READY) {
53765+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53766+ error = -EPERM;
53767+ } else {
53768+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53769+ error = -EAGAIN;
53770+ }
53771+ break;
53772+ case GR_ENABLE:
53773+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53774+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53775+ else {
53776+ if (gr_status & GR_READY)
53777+ error = -EAGAIN;
53778+ else
53779+ error = error2;
53780+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53781+ }
53782+ break;
53783+ case GR_RELOAD:
53784+ if (!(gr_status & GR_READY)) {
53785+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53786+ error = -EAGAIN;
53787+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53788+ preempt_disable();
53789+
53790+ pax_open_kernel();
53791+ gr_status &= ~GR_READY;
53792+ pax_close_kernel();
53793+
53794+ free_variables();
53795+ if (!(error2 = gracl_init(gr_usermode))) {
53796+ preempt_enable();
53797+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53798+ } else {
53799+ preempt_enable();
53800+ error = error2;
53801+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53802+ }
53803+ } else {
53804+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53805+ error = -EPERM;
53806+ }
53807+ break;
53808+ case GR_SEGVMOD:
53809+ if (unlikely(!(gr_status & GR_READY))) {
53810+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53811+ error = -EAGAIN;
53812+ break;
53813+ }
53814+
53815+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53816+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53817+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53818+ struct acl_subject_label *segvacl;
53819+ segvacl =
53820+ lookup_acl_subj_label(gr_usermode->segv_inode,
53821+ gr_usermode->segv_device,
53822+ current->role);
53823+ if (segvacl) {
53824+ segvacl->crashes = 0;
53825+ segvacl->expires = 0;
53826+ }
53827+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53828+ gr_remove_uid(gr_usermode->segv_uid);
53829+ }
53830+ } else {
53831+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53832+ error = -EPERM;
53833+ }
53834+ break;
53835+ case GR_SPROLE:
53836+ case GR_SPROLEPAM:
53837+ if (unlikely(!(gr_status & GR_READY))) {
53838+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53839+ error = -EAGAIN;
53840+ break;
53841+ }
53842+
53843+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53844+ current->role->expires = 0;
53845+ current->role->auth_attempts = 0;
53846+ }
53847+
53848+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53849+ time_after(current->role->expires, get_seconds())) {
53850+ error = -EBUSY;
53851+ goto out;
53852+ }
53853+
53854+ if (lookup_special_role_auth
53855+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53856+ && ((!sprole_salt && !sprole_sum)
53857+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53858+ char *p = "";
53859+ assign_special_role(gr_usermode->sp_role);
53860+ read_lock(&tasklist_lock);
53861+ if (current->real_parent)
53862+ p = current->real_parent->role->rolename;
53863+ read_unlock(&tasklist_lock);
53864+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53865+ p, acl_sp_role_value);
53866+ } else {
53867+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53868+ error = -EPERM;
53869+ if(!(current->role->auth_attempts++))
53870+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53871+
53872+ goto out;
53873+ }
53874+ break;
53875+ case GR_UNSPROLE:
53876+ if (unlikely(!(gr_status & GR_READY))) {
53877+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53878+ error = -EAGAIN;
53879+ break;
53880+ }
53881+
53882+ if (current->role->roletype & GR_ROLE_SPECIAL) {
53883+ char *p = "";
53884+ int i = 0;
53885+
53886+ read_lock(&tasklist_lock);
53887+ if (current->real_parent) {
53888+ p = current->real_parent->role->rolename;
53889+ i = current->real_parent->acl_role_id;
53890+ }
53891+ read_unlock(&tasklist_lock);
53892+
53893+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53894+ gr_set_acls(1);
53895+ } else {
53896+ error = -EPERM;
53897+ goto out;
53898+ }
53899+ break;
53900+ default:
53901+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53902+ error = -EINVAL;
53903+ break;
53904+ }
53905+
53906+ if (error != -EPERM)
53907+ goto out;
53908+
53909+ if(!(gr_auth_attempts++))
53910+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53911+
53912+ out:
53913+ mutex_unlock(&gr_dev_mutex);
53914+ return error;
53915+}
53916+
53917+/* must be called with
53918+ rcu_read_lock();
53919+ read_lock(&tasklist_lock);
53920+ read_lock(&grsec_exec_file_lock);
53921+*/
53922+int gr_apply_subject_to_task(struct task_struct *task)
53923+{
53924+ struct acl_object_label *obj;
53925+ char *tmpname;
53926+ struct acl_subject_label *tmpsubj;
53927+ struct file *filp;
53928+ struct name_entry *nmatch;
53929+
53930+ filp = task->exec_file;
53931+ if (filp == NULL)
53932+ return 0;
53933+
53934+ /* the following is to apply the correct subject
53935+ on binaries running when the RBAC system
53936+ is enabled, when the binaries have been
53937+ replaced or deleted since their execution
53938+ -----
53939+ when the RBAC system starts, the inode/dev
53940+ from exec_file will be one the RBAC system
53941+ is unaware of. It only knows the inode/dev
53942+ of the present file on disk, or the absence
53943+ of it.
53944+ */
53945+ preempt_disable();
53946+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
53947+
53948+ nmatch = lookup_name_entry(tmpname);
53949+ preempt_enable();
53950+ tmpsubj = NULL;
53951+ if (nmatch) {
53952+ if (nmatch->deleted)
53953+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
53954+ else
53955+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
53956+ if (tmpsubj != NULL)
53957+ task->acl = tmpsubj;
53958+ }
53959+ if (tmpsubj == NULL)
53960+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
53961+ task->role);
53962+ if (task->acl) {
53963+ task->is_writable = 0;
53964+ /* ignore additional mmap checks for processes that are writable
53965+ by the default ACL */
53966+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53967+ if (unlikely(obj->mode & GR_WRITE))
53968+ task->is_writable = 1;
53969+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53970+ if (unlikely(obj->mode & GR_WRITE))
53971+ task->is_writable = 1;
53972+
53973+ gr_set_proc_res(task);
53974+
53975+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53976+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53977+#endif
53978+ } else {
53979+ return 1;
53980+ }
53981+
53982+ return 0;
53983+}
53984+
53985+int
53986+gr_set_acls(const int type)
53987+{
53988+ struct task_struct *task, *task2;
53989+ struct acl_role_label *role = current->role;
53990+ __u16 acl_role_id = current->acl_role_id;
53991+ const struct cred *cred;
53992+ int ret;
53993+
53994+ rcu_read_lock();
53995+ read_lock(&tasklist_lock);
53996+ read_lock(&grsec_exec_file_lock);
53997+ do_each_thread(task2, task) {
53998+ /* check to see if we're called from the exit handler,
53999+ if so, only replace ACLs that have inherited the admin
54000+ ACL */
54001+
54002+ if (type && (task->role != role ||
54003+ task->acl_role_id != acl_role_id))
54004+ continue;
54005+
54006+ task->acl_role_id = 0;
54007+ task->acl_sp_role = 0;
54008+
54009+ if (task->exec_file) {
54010+ cred = __task_cred(task);
54011+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
54012+ ret = gr_apply_subject_to_task(task);
54013+ if (ret) {
54014+ read_unlock(&grsec_exec_file_lock);
54015+ read_unlock(&tasklist_lock);
54016+ rcu_read_unlock();
54017+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
54018+ return ret;
54019+ }
54020+ } else {
54021+ // it's a kernel process
54022+ task->role = kernel_role;
54023+ task->acl = kernel_role->root_label;
54024+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
54025+ task->acl->mode &= ~GR_PROCFIND;
54026+#endif
54027+ }
54028+ } while_each_thread(task2, task);
54029+ read_unlock(&grsec_exec_file_lock);
54030+ read_unlock(&tasklist_lock);
54031+ rcu_read_unlock();
54032+
54033+ return 0;
54034+}
54035+
54036+void
54037+gr_learn_resource(const struct task_struct *task,
54038+ const int res, const unsigned long wanted, const int gt)
54039+{
54040+ struct acl_subject_label *acl;
54041+ const struct cred *cred;
54042+
54043+ if (unlikely((gr_status & GR_READY) &&
54044+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
54045+ goto skip_reslog;
54046+
54047+#ifdef CONFIG_GRKERNSEC_RESLOG
54048+ gr_log_resource(task, res, wanted, gt);
54049+#endif
54050+ skip_reslog:
54051+
54052+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
54053+ return;
54054+
54055+ acl = task->acl;
54056+
54057+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
54058+ !(acl->resmask & (1 << (unsigned short) res))))
54059+ return;
54060+
54061+ if (wanted >= acl->res[res].rlim_cur) {
54062+ unsigned long res_add;
54063+
54064+ res_add = wanted;
54065+ switch (res) {
54066+ case RLIMIT_CPU:
54067+ res_add += GR_RLIM_CPU_BUMP;
54068+ break;
54069+ case RLIMIT_FSIZE:
54070+ res_add += GR_RLIM_FSIZE_BUMP;
54071+ break;
54072+ case RLIMIT_DATA:
54073+ res_add += GR_RLIM_DATA_BUMP;
54074+ break;
54075+ case RLIMIT_STACK:
54076+ res_add += GR_RLIM_STACK_BUMP;
54077+ break;
54078+ case RLIMIT_CORE:
54079+ res_add += GR_RLIM_CORE_BUMP;
54080+ break;
54081+ case RLIMIT_RSS:
54082+ res_add += GR_RLIM_RSS_BUMP;
54083+ break;
54084+ case RLIMIT_NPROC:
54085+ res_add += GR_RLIM_NPROC_BUMP;
54086+ break;
54087+ case RLIMIT_NOFILE:
54088+ res_add += GR_RLIM_NOFILE_BUMP;
54089+ break;
54090+ case RLIMIT_MEMLOCK:
54091+ res_add += GR_RLIM_MEMLOCK_BUMP;
54092+ break;
54093+ case RLIMIT_AS:
54094+ res_add += GR_RLIM_AS_BUMP;
54095+ break;
54096+ case RLIMIT_LOCKS:
54097+ res_add += GR_RLIM_LOCKS_BUMP;
54098+ break;
54099+ case RLIMIT_SIGPENDING:
54100+ res_add += GR_RLIM_SIGPENDING_BUMP;
54101+ break;
54102+ case RLIMIT_MSGQUEUE:
54103+ res_add += GR_RLIM_MSGQUEUE_BUMP;
54104+ break;
54105+ case RLIMIT_NICE:
54106+ res_add += GR_RLIM_NICE_BUMP;
54107+ break;
54108+ case RLIMIT_RTPRIO:
54109+ res_add += GR_RLIM_RTPRIO_BUMP;
54110+ break;
54111+ case RLIMIT_RTTIME:
54112+ res_add += GR_RLIM_RTTIME_BUMP;
54113+ break;
54114+ }
54115+
54116+ acl->res[res].rlim_cur = res_add;
54117+
54118+ if (wanted > acl->res[res].rlim_max)
54119+ acl->res[res].rlim_max = res_add;
54120+
54121+ /* only log the subject filename, since resource logging is supported for
54122+ single-subject learning only */
54123+ rcu_read_lock();
54124+ cred = __task_cred(task);
54125+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54126+ task->role->roletype, cred->uid, cred->gid, acl->filename,
54127+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
54128+ "", (unsigned long) res, &task->signal->saved_ip);
54129+ rcu_read_unlock();
54130+ }
54131+
54132+ return;
54133+}
54134+
54135+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
54136+void
54137+pax_set_initial_flags(struct linux_binprm *bprm)
54138+{
54139+ struct task_struct *task = current;
54140+ struct acl_subject_label *proc;
54141+ unsigned long flags;
54142+
54143+ if (unlikely(!(gr_status & GR_READY)))
54144+ return;
54145+
54146+ flags = pax_get_flags(task);
54147+
54148+ proc = task->acl;
54149+
54150+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
54151+ flags &= ~MF_PAX_PAGEEXEC;
54152+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
54153+ flags &= ~MF_PAX_SEGMEXEC;
54154+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
54155+ flags &= ~MF_PAX_RANDMMAP;
54156+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
54157+ flags &= ~MF_PAX_EMUTRAMP;
54158+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
54159+ flags &= ~MF_PAX_MPROTECT;
54160+
54161+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
54162+ flags |= MF_PAX_PAGEEXEC;
54163+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
54164+ flags |= MF_PAX_SEGMEXEC;
54165+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
54166+ flags |= MF_PAX_RANDMMAP;
54167+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
54168+ flags |= MF_PAX_EMUTRAMP;
54169+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
54170+ flags |= MF_PAX_MPROTECT;
54171+
54172+ pax_set_flags(task, flags);
54173+
54174+ return;
54175+}
54176+#endif
54177+
54178+#ifdef CONFIG_SYSCTL
54179+/* Eric Biederman likes breaking userland ABI and every inode-based security
54180+ system to save 35kb of memory */
54181+
54182+/* we modify the passed in filename, but adjust it back before returning */
54183+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
54184+{
54185+ struct name_entry *nmatch;
54186+ char *p, *lastp = NULL;
54187+ struct acl_object_label *obj = NULL, *tmp;
54188+ struct acl_subject_label *tmpsubj;
54189+ char c = '\0';
54190+
54191+ read_lock(&gr_inode_lock);
54192+
54193+ p = name + len - 1;
54194+ do {
54195+ nmatch = lookup_name_entry(name);
54196+ if (lastp != NULL)
54197+ *lastp = c;
54198+
54199+ if (nmatch == NULL)
54200+ goto next_component;
54201+ tmpsubj = current->acl;
54202+ do {
54203+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
54204+ if (obj != NULL) {
54205+ tmp = obj->globbed;
54206+ while (tmp) {
54207+ if (!glob_match(tmp->filename, name)) {
54208+ obj = tmp;
54209+ goto found_obj;
54210+ }
54211+ tmp = tmp->next;
54212+ }
54213+ goto found_obj;
54214+ }
54215+ } while ((tmpsubj = tmpsubj->parent_subject));
54216+next_component:
54217+ /* end case */
54218+ if (p == name)
54219+ break;
54220+
54221+ while (*p != '/')
54222+ p--;
54223+ if (p == name)
54224+ lastp = p + 1;
54225+ else {
54226+ lastp = p;
54227+ p--;
54228+ }
54229+ c = *lastp;
54230+ *lastp = '\0';
54231+ } while (1);
54232+found_obj:
54233+ read_unlock(&gr_inode_lock);
54234+ /* obj returned will always be non-null */
54235+ return obj;
54236+}
54237+
54238+/* returns 0 when allowing, non-zero on error
54239+ op of 0 is used for readdir, so we don't log the names of hidden files
54240+*/
54241+__u32
54242+gr_handle_sysctl(const struct ctl_table *table, const int op)
54243+{
54244+ struct ctl_table *tmp;
54245+ const char *proc_sys = "/proc/sys";
54246+ char *path;
54247+ struct acl_object_label *obj;
54248+ unsigned short len = 0, pos = 0, depth = 0, i;
54249+ __u32 err = 0;
54250+ __u32 mode = 0;
54251+
54252+ if (unlikely(!(gr_status & GR_READY)))
54253+ return 0;
54254+
54255+ /* for now, ignore operations on non-sysctl entries if it's not a
54256+ readdir*/
54257+ if (table->child != NULL && op != 0)
54258+ return 0;
54259+
54260+ mode |= GR_FIND;
54261+ /* it's only a read if it's an entry, read on dirs is for readdir */
54262+ if (op & MAY_READ)
54263+ mode |= GR_READ;
54264+ if (op & MAY_WRITE)
54265+ mode |= GR_WRITE;
54266+
54267+ preempt_disable();
54268+
54269+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
54270+
54271+ /* it's only a read/write if it's an actual entry, not a dir
54272+ (which are opened for readdir)
54273+ */
54274+
54275+ /* convert the requested sysctl entry into a pathname */
54276+
54277+ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
54278+ len += strlen(tmp->procname);
54279+ len++;
54280+ depth++;
54281+ }
54282+
54283+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
54284+ /* deny */
54285+ goto out;
54286+ }
54287+
54288+ memset(path, 0, PAGE_SIZE);
54289+
54290+ memcpy(path, proc_sys, strlen(proc_sys));
54291+
54292+ pos += strlen(proc_sys);
54293+
54294+ for (; depth > 0; depth--) {
54295+ path[pos] = '/';
54296+ pos++;
54297+ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
54298+ if (depth == i) {
54299+ memcpy(path + pos, tmp->procname,
54300+ strlen(tmp->procname));
54301+ pos += strlen(tmp->procname);
54302+ }
54303+ i++;
54304+ }
54305+ }
54306+
54307+ obj = gr_lookup_by_name(path, pos);
54308+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
54309+
54310+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
54311+ ((err & mode) != mode))) {
54312+ __u32 new_mode = mode;
54313+
54314+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
54315+
54316+ err = 0;
54317+ gr_log_learn_sysctl(path, new_mode);
54318+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
54319+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
54320+ err = -ENOENT;
54321+ } else if (!(err & GR_FIND)) {
54322+ err = -ENOENT;
54323+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
54324+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
54325+ path, (mode & GR_READ) ? " reading" : "",
54326+ (mode & GR_WRITE) ? " writing" : "");
54327+ err = -EACCES;
54328+ } else if ((err & mode) != mode) {
54329+ err = -EACCES;
54330+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
54331+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
54332+ path, (mode & GR_READ) ? " reading" : "",
54333+ (mode & GR_WRITE) ? " writing" : "");
54334+ err = 0;
54335+ } else
54336+ err = 0;
54337+
54338+ out:
54339+ preempt_enable();
54340+
54341+ return err;
54342+}
54343+#endif
54344+
54345+int
54346+gr_handle_proc_ptrace(struct task_struct *task)
54347+{
54348+ struct file *filp;
54349+ struct task_struct *tmp = task;
54350+ struct task_struct *curtemp = current;
54351+ __u32 retmode;
54352+
54353+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54354+ if (unlikely(!(gr_status & GR_READY)))
54355+ return 0;
54356+#endif
54357+
54358+ read_lock(&tasklist_lock);
54359+ read_lock(&grsec_exec_file_lock);
54360+ filp = task->exec_file;
54361+
54362+ while (tmp->pid > 0) {
54363+ if (tmp == curtemp)
54364+ break;
54365+ tmp = tmp->real_parent;
54366+ }
54367+
54368+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54369+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
54370+ read_unlock(&grsec_exec_file_lock);
54371+ read_unlock(&tasklist_lock);
54372+ return 1;
54373+ }
54374+
54375+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54376+ if (!(gr_status & GR_READY)) {
54377+ read_unlock(&grsec_exec_file_lock);
54378+ read_unlock(&tasklist_lock);
54379+ return 0;
54380+ }
54381+#endif
54382+
54383+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
54384+ read_unlock(&grsec_exec_file_lock);
54385+ read_unlock(&tasklist_lock);
54386+
54387+ if (retmode & GR_NOPTRACE)
54388+ return 1;
54389+
54390+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
54391+ && (current->acl != task->acl || (current->acl != current->role->root_label
54392+ && current->pid != task->pid)))
54393+ return 1;
54394+
54395+ return 0;
54396+}
54397+
54398+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
54399+{
54400+ if (unlikely(!(gr_status & GR_READY)))
54401+ return;
54402+
54403+ if (!(current->role->roletype & GR_ROLE_GOD))
54404+ return;
54405+
54406+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
54407+ p->role->rolename, gr_task_roletype_to_char(p),
54408+ p->acl->filename);
54409+}
54410+
54411+int
54412+gr_handle_ptrace(struct task_struct *task, const long request)
54413+{
54414+ struct task_struct *tmp = task;
54415+ struct task_struct *curtemp = current;
54416+ __u32 retmode;
54417+
54418+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54419+ if (unlikely(!(gr_status & GR_READY)))
54420+ return 0;
54421+#endif
54422+
54423+ read_lock(&tasklist_lock);
54424+ while (tmp->pid > 0) {
54425+ if (tmp == curtemp)
54426+ break;
54427+ tmp = tmp->real_parent;
54428+ }
54429+
54430+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54431+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
54432+ read_unlock(&tasklist_lock);
54433+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54434+ return 1;
54435+ }
54436+ read_unlock(&tasklist_lock);
54437+
54438+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54439+ if (!(gr_status & GR_READY))
54440+ return 0;
54441+#endif
54442+
54443+ read_lock(&grsec_exec_file_lock);
54444+ if (unlikely(!task->exec_file)) {
54445+ read_unlock(&grsec_exec_file_lock);
54446+ return 0;
54447+ }
54448+
54449+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
54450+ read_unlock(&grsec_exec_file_lock);
54451+
54452+ if (retmode & GR_NOPTRACE) {
54453+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54454+ return 1;
54455+ }
54456+
54457+ if (retmode & GR_PTRACERD) {
54458+ switch (request) {
54459+ case PTRACE_SEIZE:
54460+ case PTRACE_POKETEXT:
54461+ case PTRACE_POKEDATA:
54462+ case PTRACE_POKEUSR:
54463+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
54464+ case PTRACE_SETREGS:
54465+ case PTRACE_SETFPREGS:
54466+#endif
54467+#ifdef CONFIG_X86
54468+ case PTRACE_SETFPXREGS:
54469+#endif
54470+#ifdef CONFIG_ALTIVEC
54471+ case PTRACE_SETVRREGS:
54472+#endif
54473+ return 1;
54474+ default:
54475+ return 0;
54476+ }
54477+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
54478+ !(current->role->roletype & GR_ROLE_GOD) &&
54479+ (current->acl != task->acl)) {
54480+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54481+ return 1;
54482+ }
54483+
54484+ return 0;
54485+}
54486+
54487+static int is_writable_mmap(const struct file *filp)
54488+{
54489+ struct task_struct *task = current;
54490+ struct acl_object_label *obj, *obj2;
54491+
54492+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
54493+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
54494+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54495+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
54496+ task->role->root_label);
54497+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
54498+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
54499+ return 1;
54500+ }
54501+ }
54502+ return 0;
54503+}
54504+
54505+int
54506+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
54507+{
54508+ __u32 mode;
54509+
54510+ if (unlikely(!file || !(prot & PROT_EXEC)))
54511+ return 1;
54512+
54513+ if (is_writable_mmap(file))
54514+ return 0;
54515+
54516+ mode =
54517+ gr_search_file(file->f_path.dentry,
54518+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54519+ file->f_path.mnt);
54520+
54521+ if (!gr_tpe_allow(file))
54522+ return 0;
54523+
54524+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54525+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54526+ return 0;
54527+ } else if (unlikely(!(mode & GR_EXEC))) {
54528+ return 0;
54529+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54530+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54531+ return 1;
54532+ }
54533+
54534+ return 1;
54535+}
54536+
54537+int
54538+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54539+{
54540+ __u32 mode;
54541+
54542+ if (unlikely(!file || !(prot & PROT_EXEC)))
54543+ return 1;
54544+
54545+ if (is_writable_mmap(file))
54546+ return 0;
54547+
54548+ mode =
54549+ gr_search_file(file->f_path.dentry,
54550+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54551+ file->f_path.mnt);
54552+
54553+ if (!gr_tpe_allow(file))
54554+ return 0;
54555+
54556+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54557+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54558+ return 0;
54559+ } else if (unlikely(!(mode & GR_EXEC))) {
54560+ return 0;
54561+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54562+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54563+ return 1;
54564+ }
54565+
54566+ return 1;
54567+}
54568+
54569+void
54570+gr_acl_handle_psacct(struct task_struct *task, const long code)
54571+{
54572+ unsigned long runtime;
54573+ unsigned long cputime;
54574+ unsigned int wday, cday;
54575+ __u8 whr, chr;
54576+ __u8 wmin, cmin;
54577+ __u8 wsec, csec;
54578+ struct timespec timeval;
54579+
54580+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
54581+ !(task->acl->mode & GR_PROCACCT)))
54582+ return;
54583+
54584+ do_posix_clock_monotonic_gettime(&timeval);
54585+ runtime = timeval.tv_sec - task->start_time.tv_sec;
54586+ wday = runtime / (3600 * 24);
54587+ runtime -= wday * (3600 * 24);
54588+ whr = runtime / 3600;
54589+ runtime -= whr * 3600;
54590+ wmin = runtime / 60;
54591+ runtime -= wmin * 60;
54592+ wsec = runtime;
54593+
54594+ cputime = (task->utime + task->stime) / HZ;
54595+ cday = cputime / (3600 * 24);
54596+ cputime -= cday * (3600 * 24);
54597+ chr = cputime / 3600;
54598+ cputime -= chr * 3600;
54599+ cmin = cputime / 60;
54600+ cputime -= cmin * 60;
54601+ csec = cputime;
54602+
54603+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
54604+
54605+ return;
54606+}
54607+
54608+void gr_set_kernel_label(struct task_struct *task)
54609+{
54610+ if (gr_status & GR_READY) {
54611+ task->role = kernel_role;
54612+ task->acl = kernel_role->root_label;
54613+ }
54614+ return;
54615+}
54616+
54617+#ifdef CONFIG_TASKSTATS
54618+int gr_is_taskstats_denied(int pid)
54619+{
54620+ struct task_struct *task;
54621+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54622+ const struct cred *cred;
54623+#endif
54624+ int ret = 0;
54625+
54626+ /* restrict taskstats viewing to un-chrooted root users
54627+ who have the 'view' subject flag if the RBAC system is enabled
54628+ */
54629+
54630+ rcu_read_lock();
54631+ read_lock(&tasklist_lock);
54632+ task = find_task_by_vpid(pid);
54633+ if (task) {
54634+#ifdef CONFIG_GRKERNSEC_CHROOT
54635+ if (proc_is_chrooted(task))
54636+ ret = -EACCES;
54637+#endif
54638+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54639+ cred = __task_cred(task);
54640+#ifdef CONFIG_GRKERNSEC_PROC_USER
54641+ if (cred->uid != 0)
54642+ ret = -EACCES;
54643+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54644+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
54645+ ret = -EACCES;
54646+#endif
54647+#endif
54648+ if (gr_status & GR_READY) {
54649+ if (!(task->acl->mode & GR_VIEW))
54650+ ret = -EACCES;
54651+ }
54652+ } else
54653+ ret = -ENOENT;
54654+
54655+ read_unlock(&tasklist_lock);
54656+ rcu_read_unlock();
54657+
54658+ return ret;
54659+}
54660+#endif
54661+
54662+/* AUXV entries are filled via a descendant of search_binary_handler
54663+ after we've already applied the subject for the target
54664+*/
54665+int gr_acl_enable_at_secure(void)
54666+{
54667+ if (unlikely(!(gr_status & GR_READY)))
54668+ return 0;
54669+
54670+ if (current->acl->mode & GR_ATSECURE)
54671+ return 1;
54672+
54673+ return 0;
54674+}
54675+
54676+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
54677+{
54678+ struct task_struct *task = current;
54679+ struct dentry *dentry = file->f_path.dentry;
54680+ struct vfsmount *mnt = file->f_path.mnt;
54681+ struct acl_object_label *obj, *tmp;
54682+ struct acl_subject_label *subj;
54683+ unsigned int bufsize;
54684+ int is_not_root;
54685+ char *path;
54686+ dev_t dev = __get_dev(dentry);
54687+
54688+ if (unlikely(!(gr_status & GR_READY)))
54689+ return 1;
54690+
54691+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54692+ return 1;
54693+
54694+ /* ignore Eric Biederman */
54695+ if (IS_PRIVATE(dentry->d_inode))
54696+ return 1;
54697+
54698+ subj = task->acl;
54699+ do {
54700+ obj = lookup_acl_obj_label(ino, dev, subj);
54701+ if (obj != NULL)
54702+ return (obj->mode & GR_FIND) ? 1 : 0;
54703+ } while ((subj = subj->parent_subject));
54704+
54705+ /* this is purely an optimization since we're looking for an object
54706+ for the directory we're doing a readdir on
54707+ if it's possible for any globbed object to match the entry we're
54708+ filling into the directory, then the object we find here will be
54709+ an anchor point with attached globbed objects
54710+ */
54711+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54712+ if (obj->globbed == NULL)
54713+ return (obj->mode & GR_FIND) ? 1 : 0;
54714+
54715+ is_not_root = ((obj->filename[0] == '/') &&
54716+ (obj->filename[1] == '\0')) ? 0 : 1;
54717+ bufsize = PAGE_SIZE - namelen - is_not_root;
54718+
54719+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
54720+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54721+ return 1;
54722+
54723+ preempt_disable();
54724+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54725+ bufsize);
54726+
54727+ bufsize = strlen(path);
54728+
54729+ /* if base is "/", don't append an additional slash */
54730+ if (is_not_root)
54731+ *(path + bufsize) = '/';
54732+ memcpy(path + bufsize + is_not_root, name, namelen);
54733+ *(path + bufsize + namelen + is_not_root) = '\0';
54734+
54735+ tmp = obj->globbed;
54736+ while (tmp) {
54737+ if (!glob_match(tmp->filename, path)) {
54738+ preempt_enable();
54739+ return (tmp->mode & GR_FIND) ? 1 : 0;
54740+ }
54741+ tmp = tmp->next;
54742+ }
54743+ preempt_enable();
54744+ return (obj->mode & GR_FIND) ? 1 : 0;
54745+}
54746+
54747+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54748+EXPORT_SYMBOL(gr_acl_is_enabled);
54749+#endif
54750+EXPORT_SYMBOL(gr_learn_resource);
54751+EXPORT_SYMBOL(gr_set_kernel_label);
54752+#ifdef CONFIG_SECURITY
54753+EXPORT_SYMBOL(gr_check_user_change);
54754+EXPORT_SYMBOL(gr_check_group_change);
54755+#endif
54756+
54757diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
54758new file mode 100644
54759index 0000000..34fefda
54760--- /dev/null
54761+++ b/grsecurity/gracl_alloc.c
54762@@ -0,0 +1,105 @@
54763+#include <linux/kernel.h>
54764+#include <linux/mm.h>
54765+#include <linux/slab.h>
54766+#include <linux/vmalloc.h>
54767+#include <linux/gracl.h>
54768+#include <linux/grsecurity.h>
54769+
54770+static unsigned long alloc_stack_next = 1;
54771+static unsigned long alloc_stack_size = 1;
54772+static void **alloc_stack;
54773+
54774+static __inline__ int
54775+alloc_pop(void)
54776+{
54777+ if (alloc_stack_next == 1)
54778+ return 0;
54779+
54780+ kfree(alloc_stack[alloc_stack_next - 2]);
54781+
54782+ alloc_stack_next--;
54783+
54784+ return 1;
54785+}
54786+
54787+static __inline__ int
54788+alloc_push(void *buf)
54789+{
54790+ if (alloc_stack_next >= alloc_stack_size)
54791+ return 1;
54792+
54793+ alloc_stack[alloc_stack_next - 1] = buf;
54794+
54795+ alloc_stack_next++;
54796+
54797+ return 0;
54798+}
54799+
54800+void *
54801+acl_alloc(unsigned long len)
54802+{
54803+ void *ret = NULL;
54804+
54805+ if (!len || len > PAGE_SIZE)
54806+ goto out;
54807+
54808+ ret = kmalloc(len, GFP_KERNEL);
54809+
54810+ if (ret) {
54811+ if (alloc_push(ret)) {
54812+ kfree(ret);
54813+ ret = NULL;
54814+ }
54815+ }
54816+
54817+out:
54818+ return ret;
54819+}
54820+
54821+void *
54822+acl_alloc_num(unsigned long num, unsigned long len)
54823+{
54824+ if (!len || (num > (PAGE_SIZE / len)))
54825+ return NULL;
54826+
54827+ return acl_alloc(num * len);
54828+}
54829+
54830+void
54831+acl_free_all(void)
54832+{
54833+ if (gr_acl_is_enabled() || !alloc_stack)
54834+ return;
54835+
54836+ while (alloc_pop()) ;
54837+
54838+ if (alloc_stack) {
54839+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54840+ kfree(alloc_stack);
54841+ else
54842+ vfree(alloc_stack);
54843+ }
54844+
54845+ alloc_stack = NULL;
54846+ alloc_stack_size = 1;
54847+ alloc_stack_next = 1;
54848+
54849+ return;
54850+}
54851+
54852+int
54853+acl_alloc_stack_init(unsigned long size)
54854+{
54855+ if ((size * sizeof (void *)) <= PAGE_SIZE)
54856+ alloc_stack =
54857+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54858+ else
54859+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
54860+
54861+ alloc_stack_size = size;
54862+
54863+ if (!alloc_stack)
54864+ return 0;
54865+ else
54866+ return 1;
54867+}
54868diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54869new file mode 100644
54870index 0000000..6d21049
54871--- /dev/null
54872+++ b/grsecurity/gracl_cap.c
54873@@ -0,0 +1,110 @@
54874+#include <linux/kernel.h>
54875+#include <linux/module.h>
54876+#include <linux/sched.h>
54877+#include <linux/gracl.h>
54878+#include <linux/grsecurity.h>
54879+#include <linux/grinternal.h>
54880+
54881+extern const char *captab_log[];
54882+extern int captab_log_entries;
54883+
54884+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
54885+{
54886+ struct acl_subject_label *curracl;
54887+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54888+ kernel_cap_t cap_audit = __cap_empty_set;
54889+
54890+ if (!gr_acl_is_enabled())
54891+ return 1;
54892+
54893+ curracl = task->acl;
54894+
54895+ cap_drop = curracl->cap_lower;
54896+ cap_mask = curracl->cap_mask;
54897+ cap_audit = curracl->cap_invert_audit;
54898+
54899+ while ((curracl = curracl->parent_subject)) {
54900+ /* if the cap isn't specified in the current computed mask but is specified in the
54901+ current level subject, and is lowered in the current level subject, then add
54902+ it to the set of dropped capabilities
54903+ otherwise, add the current level subject's mask to the current computed mask
54904+ */
54905+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54906+ cap_raise(cap_mask, cap);
54907+ if (cap_raised(curracl->cap_lower, cap))
54908+ cap_raise(cap_drop, cap);
54909+ if (cap_raised(curracl->cap_invert_audit, cap))
54910+ cap_raise(cap_audit, cap);
54911+ }
54912+ }
54913+
54914+ if (!cap_raised(cap_drop, cap)) {
54915+ if (cap_raised(cap_audit, cap))
54916+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
54917+ return 1;
54918+ }
54919+
54920+ curracl = task->acl;
54921+
54922+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
54923+ && cap_raised(cred->cap_effective, cap)) {
54924+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54925+ task->role->roletype, cred->uid,
54926+ cred->gid, task->exec_file ?
54927+ gr_to_filename(task->exec_file->f_path.dentry,
54928+ task->exec_file->f_path.mnt) : curracl->filename,
54929+ curracl->filename, 0UL,
54930+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
54931+ return 1;
54932+ }
54933+
54934+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
54935+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
54936+
54937+ return 0;
54938+}
54939+
54940+int
54941+gr_acl_is_capable(const int cap)
54942+{
54943+ return gr_task_acl_is_capable(current, current_cred(), cap);
54944+}
54945+
54946+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
54947+{
54948+ struct acl_subject_label *curracl;
54949+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54950+
54951+ if (!gr_acl_is_enabled())
54952+ return 1;
54953+
54954+ curracl = task->acl;
54955+
54956+ cap_drop = curracl->cap_lower;
54957+ cap_mask = curracl->cap_mask;
54958+
54959+ while ((curracl = curracl->parent_subject)) {
54960+ /* if the cap isn't specified in the current computed mask but is specified in the
54961+ current level subject, and is lowered in the current level subject, then add
54962+ it to the set of dropped capabilities
54963+ otherwise, add the current level subject's mask to the current computed mask
54964+ */
54965+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54966+ cap_raise(cap_mask, cap);
54967+ if (cap_raised(curracl->cap_lower, cap))
54968+ cap_raise(cap_drop, cap);
54969+ }
54970+ }
54971+
54972+ if (!cap_raised(cap_drop, cap))
54973+ return 1;
54974+
54975+ return 0;
54976+}
54977+
54978+int
54979+gr_acl_is_capable_nolog(const int cap)
54980+{
54981+ return gr_task_acl_is_capable_nolog(current, cap);
54982+}
54983+
54984diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
54985new file mode 100644
54986index 0000000..88d0e87
54987--- /dev/null
54988+++ b/grsecurity/gracl_fs.c
54989@@ -0,0 +1,435 @@
54990+#include <linux/kernel.h>
54991+#include <linux/sched.h>
54992+#include <linux/types.h>
54993+#include <linux/fs.h>
54994+#include <linux/file.h>
54995+#include <linux/stat.h>
54996+#include <linux/grsecurity.h>
54997+#include <linux/grinternal.h>
54998+#include <linux/gracl.h>
54999+
55000+umode_t
55001+gr_acl_umask(void)
55002+{
55003+ if (unlikely(!gr_acl_is_enabled()))
55004+ return 0;
55005+
55006+ return current->role->umask;
55007+}
55008+
55009+__u32
55010+gr_acl_handle_hidden_file(const struct dentry * dentry,
55011+ const struct vfsmount * mnt)
55012+{
55013+ __u32 mode;
55014+
55015+ if (unlikely(!dentry->d_inode))
55016+ return GR_FIND;
55017+
55018+ mode =
55019+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
55020+
55021+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
55022+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55023+ return mode;
55024+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
55025+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55026+ return 0;
55027+ } else if (unlikely(!(mode & GR_FIND)))
55028+ return 0;
55029+
55030+ return GR_FIND;
55031+}
55032+
55033+__u32
55034+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
55035+ int acc_mode)
55036+{
55037+ __u32 reqmode = GR_FIND;
55038+ __u32 mode;
55039+
55040+ if (unlikely(!dentry->d_inode))
55041+ return reqmode;
55042+
55043+ if (acc_mode & MAY_APPEND)
55044+ reqmode |= GR_APPEND;
55045+ else if (acc_mode & MAY_WRITE)
55046+ reqmode |= GR_WRITE;
55047+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
55048+ reqmode |= GR_READ;
55049+
55050+ mode =
55051+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55052+ mnt);
55053+
55054+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55055+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55056+ reqmode & GR_READ ? " reading" : "",
55057+ reqmode & GR_WRITE ? " writing" : reqmode &
55058+ GR_APPEND ? " appending" : "");
55059+ return reqmode;
55060+ } else
55061+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55062+ {
55063+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55064+ reqmode & GR_READ ? " reading" : "",
55065+ reqmode & GR_WRITE ? " writing" : reqmode &
55066+ GR_APPEND ? " appending" : "");
55067+ return 0;
55068+ } else if (unlikely((mode & reqmode) != reqmode))
55069+ return 0;
55070+
55071+ return reqmode;
55072+}
55073+
55074+__u32
55075+gr_acl_handle_creat(const struct dentry * dentry,
55076+ const struct dentry * p_dentry,
55077+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
55078+ const int imode)
55079+{
55080+ __u32 reqmode = GR_WRITE | GR_CREATE;
55081+ __u32 mode;
55082+
55083+ if (acc_mode & MAY_APPEND)
55084+ reqmode |= GR_APPEND;
55085+ // if a directory was required or the directory already exists, then
55086+ // don't count this open as a read
55087+ if ((acc_mode & MAY_READ) &&
55088+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
55089+ reqmode |= GR_READ;
55090+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
55091+ reqmode |= GR_SETID;
55092+
55093+ mode =
55094+ gr_check_create(dentry, p_dentry, p_mnt,
55095+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55096+
55097+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55098+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55099+ reqmode & GR_READ ? " reading" : "",
55100+ reqmode & GR_WRITE ? " writing" : reqmode &
55101+ GR_APPEND ? " appending" : "");
55102+ return reqmode;
55103+ } else
55104+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55105+ {
55106+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55107+ reqmode & GR_READ ? " reading" : "",
55108+ reqmode & GR_WRITE ? " writing" : reqmode &
55109+ GR_APPEND ? " appending" : "");
55110+ return 0;
55111+ } else if (unlikely((mode & reqmode) != reqmode))
55112+ return 0;
55113+
55114+ return reqmode;
55115+}
55116+
55117+__u32
55118+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55119+ const int fmode)
55120+{
55121+ __u32 mode, reqmode = GR_FIND;
55122+
55123+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55124+ reqmode |= GR_EXEC;
55125+ if (fmode & S_IWOTH)
55126+ reqmode |= GR_WRITE;
55127+ if (fmode & S_IROTH)
55128+ reqmode |= GR_READ;
55129+
55130+ mode =
55131+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55132+ mnt);
55133+
55134+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55135+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55136+ reqmode & GR_READ ? " reading" : "",
55137+ reqmode & GR_WRITE ? " writing" : "",
55138+ reqmode & GR_EXEC ? " executing" : "");
55139+ return reqmode;
55140+ } else
55141+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55142+ {
55143+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55144+ reqmode & GR_READ ? " reading" : "",
55145+ reqmode & GR_WRITE ? " writing" : "",
55146+ reqmode & GR_EXEC ? " executing" : "");
55147+ return 0;
55148+ } else if (unlikely((mode & reqmode) != reqmode))
55149+ return 0;
55150+
55151+ return reqmode;
55152+}
55153+
55154+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
55155+{
55156+ __u32 mode;
55157+
55158+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
55159+
55160+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55161+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
55162+ return mode;
55163+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55164+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
55165+ return 0;
55166+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
55167+ return 0;
55168+
55169+ return (reqmode);
55170+}
55171+
55172+__u32
55173+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
55174+{
55175+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
55176+}
55177+
55178+__u32
55179+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
55180+{
55181+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
55182+}
55183+
55184+__u32
55185+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
55186+{
55187+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
55188+}
55189+
55190+__u32
55191+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
55192+{
55193+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
55194+}
55195+
55196+__u32
55197+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
55198+ umode_t *modeptr)
55199+{
55200+ umode_t mode;
55201+
55202+ *modeptr &= ~gr_acl_umask();
55203+ mode = *modeptr;
55204+
55205+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
55206+ return 1;
55207+
55208+ if (unlikely(mode & (S_ISUID | S_ISGID))) {
55209+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55210+ GR_CHMOD_ACL_MSG);
55211+ } else {
55212+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
55213+ }
55214+}
55215+
55216+__u32
55217+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
55218+{
55219+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
55220+}
55221+
55222+__u32
55223+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
55224+{
55225+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
55226+}
55227+
55228+__u32
55229+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
55230+{
55231+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
55232+}
55233+
55234+__u32
55235+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
55236+{
55237+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
55238+ GR_UNIXCONNECT_ACL_MSG);
55239+}
55240+
55241+/* hardlinks require at minimum create and link permission,
55242+ any additional privilege required is based on the
55243+ privilege of the file being linked to
55244+*/
55245+__u32
55246+gr_acl_handle_link(const struct dentry * new_dentry,
55247+ const struct dentry * parent_dentry,
55248+ const struct vfsmount * parent_mnt,
55249+ const struct dentry * old_dentry,
55250+ const struct vfsmount * old_mnt, const char *to)
55251+{
55252+ __u32 mode;
55253+ __u32 needmode = GR_CREATE | GR_LINK;
55254+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
55255+
55256+ mode =
55257+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
55258+ old_mnt);
55259+
55260+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
55261+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55262+ return mode;
55263+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55264+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55265+ return 0;
55266+ } else if (unlikely((mode & needmode) != needmode))
55267+ return 0;
55268+
55269+ return 1;
55270+}
55271+
55272+__u32
55273+gr_acl_handle_symlink(const struct dentry * new_dentry,
55274+ const struct dentry * parent_dentry,
55275+ const struct vfsmount * parent_mnt, const char *from)
55276+{
55277+ __u32 needmode = GR_WRITE | GR_CREATE;
55278+ __u32 mode;
55279+
55280+ mode =
55281+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
55282+ GR_CREATE | GR_AUDIT_CREATE |
55283+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
55284+
55285+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
55286+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55287+ return mode;
55288+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55289+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55290+ return 0;
55291+ } else if (unlikely((mode & needmode) != needmode))
55292+ return 0;
55293+
55294+ return (GR_WRITE | GR_CREATE);
55295+}
55296+
55297+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
55298+{
55299+ __u32 mode;
55300+
55301+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55302+
55303+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55304+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
55305+ return mode;
55306+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55307+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
55308+ return 0;
55309+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
55310+ return 0;
55311+
55312+ return (reqmode);
55313+}
55314+
55315+__u32
55316+gr_acl_handle_mknod(const struct dentry * new_dentry,
55317+ const struct dentry * parent_dentry,
55318+ const struct vfsmount * parent_mnt,
55319+ const int mode)
55320+{
55321+ __u32 reqmode = GR_WRITE | GR_CREATE;
55322+ if (unlikely(mode & (S_ISUID | S_ISGID)))
55323+ reqmode |= GR_SETID;
55324+
55325+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55326+ reqmode, GR_MKNOD_ACL_MSG);
55327+}
55328+
55329+__u32
55330+gr_acl_handle_mkdir(const struct dentry *new_dentry,
55331+ const struct dentry *parent_dentry,
55332+ const struct vfsmount *parent_mnt)
55333+{
55334+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55335+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
55336+}
55337+
55338+#define RENAME_CHECK_SUCCESS(old, new) \
55339+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
55340+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
55341+
55342+int
55343+gr_acl_handle_rename(struct dentry *new_dentry,
55344+ struct dentry *parent_dentry,
55345+ const struct vfsmount *parent_mnt,
55346+ struct dentry *old_dentry,
55347+ struct inode *old_parent_inode,
55348+ struct vfsmount *old_mnt, const char *newname)
55349+{
55350+ __u32 comp1, comp2;
55351+ int error = 0;
55352+
55353+ if (unlikely(!gr_acl_is_enabled()))
55354+ return 0;
55355+
55356+ if (!new_dentry->d_inode) {
55357+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
55358+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
55359+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
55360+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
55361+ GR_DELETE | GR_AUDIT_DELETE |
55362+ GR_AUDIT_READ | GR_AUDIT_WRITE |
55363+ GR_SUPPRESS, old_mnt);
55364+ } else {
55365+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
55366+ GR_CREATE | GR_DELETE |
55367+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
55368+ GR_AUDIT_READ | GR_AUDIT_WRITE |
55369+ GR_SUPPRESS, parent_mnt);
55370+ comp2 =
55371+ gr_search_file(old_dentry,
55372+ GR_READ | GR_WRITE | GR_AUDIT_READ |
55373+ GR_DELETE | GR_AUDIT_DELETE |
55374+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
55375+ }
55376+
55377+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
55378+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
55379+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55380+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
55381+ && !(comp2 & GR_SUPPRESS)) {
55382+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55383+ error = -EACCES;
55384+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
55385+ error = -EACCES;
55386+
55387+ return error;
55388+}
55389+
55390+void
55391+gr_acl_handle_exit(void)
55392+{
55393+ u16 id;
55394+ char *rolename;
55395+ struct file *exec_file;
55396+
55397+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
55398+ !(current->role->roletype & GR_ROLE_PERSIST))) {
55399+ id = current->acl_role_id;
55400+ rolename = current->role->rolename;
55401+ gr_set_acls(1);
55402+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
55403+ }
55404+
55405+ write_lock(&grsec_exec_file_lock);
55406+ exec_file = current->exec_file;
55407+ current->exec_file = NULL;
55408+ write_unlock(&grsec_exec_file_lock);
55409+
55410+ if (exec_file)
55411+ fput(exec_file);
55412+}
55413+
55414+int
55415+gr_acl_handle_procpidmem(const struct task_struct *task)
55416+{
55417+ if (unlikely(!gr_acl_is_enabled()))
55418+ return 0;
55419+
55420+ if (task != current && task->acl->mode & GR_PROTPROCFD)
55421+ return -EACCES;
55422+
55423+ return 0;
55424+}
55425diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
55426new file mode 100644
55427index 0000000..58800a7
55428--- /dev/null
55429+++ b/grsecurity/gracl_ip.c
55430@@ -0,0 +1,384 @@
55431+#include <linux/kernel.h>
55432+#include <asm/uaccess.h>
55433+#include <asm/errno.h>
55434+#include <net/sock.h>
55435+#include <linux/file.h>
55436+#include <linux/fs.h>
55437+#include <linux/net.h>
55438+#include <linux/in.h>
55439+#include <linux/skbuff.h>
55440+#include <linux/ip.h>
55441+#include <linux/udp.h>
55442+#include <linux/types.h>
55443+#include <linux/sched.h>
55444+#include <linux/netdevice.h>
55445+#include <linux/inetdevice.h>
55446+#include <linux/gracl.h>
55447+#include <linux/grsecurity.h>
55448+#include <linux/grinternal.h>
55449+
55450+#define GR_BIND 0x01
55451+#define GR_CONNECT 0x02
55452+#define GR_INVERT 0x04
55453+#define GR_BINDOVERRIDE 0x08
55454+#define GR_CONNECTOVERRIDE 0x10
55455+#define GR_SOCK_FAMILY 0x20
55456+
55457+static const char * gr_protocols[IPPROTO_MAX] = {
55458+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
55459+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
55460+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
55461+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
55462+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
55463+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
55464+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
55465+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
55466+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
55467+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
55468+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
55469+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
55470+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
55471+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
55472+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
55473+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
55474+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
55475+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
55476+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
55477+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
55478+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
55479+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
55480+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
55481+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
55482+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
55483+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
55484+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
55485+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
55486+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
55487+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
55488+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
55489+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
55490+ };
55491+
55492+static const char * gr_socktypes[SOCK_MAX] = {
55493+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
55494+ "unknown:7", "unknown:8", "unknown:9", "packet"
55495+ };
55496+
55497+static const char * gr_sockfamilies[AF_MAX+1] = {
55498+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
55499+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
55500+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
55501+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
55502+ };
55503+
55504+const char *
55505+gr_proto_to_name(unsigned char proto)
55506+{
55507+ return gr_protocols[proto];
55508+}
55509+
55510+const char *
55511+gr_socktype_to_name(unsigned char type)
55512+{
55513+ return gr_socktypes[type];
55514+}
55515+
55516+const char *
55517+gr_sockfamily_to_name(unsigned char family)
55518+{
55519+ return gr_sockfamilies[family];
55520+}
55521+
55522+int
55523+gr_search_socket(const int domain, const int type, const int protocol)
55524+{
55525+ struct acl_subject_label *curr;
55526+ const struct cred *cred = current_cred();
55527+
55528+ if (unlikely(!gr_acl_is_enabled()))
55529+ goto exit;
55530+
55531+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
55532+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
55533+ goto exit; // let the kernel handle it
55534+
55535+ curr = current->acl;
55536+
55537+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
55538+ /* the family is allowed, if this is PF_INET allow it only if
55539+ the extra sock type/protocol checks pass */
55540+ if (domain == PF_INET)
55541+ goto inet_check;
55542+ goto exit;
55543+ } else {
55544+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55545+ __u32 fakeip = 0;
55546+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55547+ current->role->roletype, cred->uid,
55548+ cred->gid, current->exec_file ?
55549+ gr_to_filename(current->exec_file->f_path.dentry,
55550+ current->exec_file->f_path.mnt) :
55551+ curr->filename, curr->filename,
55552+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
55553+ &current->signal->saved_ip);
55554+ goto exit;
55555+ }
55556+ goto exit_fail;
55557+ }
55558+
55559+inet_check:
55560+ /* the rest of this checking is for IPv4 only */
55561+ if (!curr->ips)
55562+ goto exit;
55563+
55564+ if ((curr->ip_type & (1 << type)) &&
55565+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
55566+ goto exit;
55567+
55568+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55569+ /* we don't place acls on raw sockets , and sometimes
55570+ dgram/ip sockets are opened for ioctl and not
55571+ bind/connect, so we'll fake a bind learn log */
55572+ if (type == SOCK_RAW || type == SOCK_PACKET) {
55573+ __u32 fakeip = 0;
55574+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55575+ current->role->roletype, cred->uid,
55576+ cred->gid, current->exec_file ?
55577+ gr_to_filename(current->exec_file->f_path.dentry,
55578+ current->exec_file->f_path.mnt) :
55579+ curr->filename, curr->filename,
55580+ &fakeip, 0, type,
55581+ protocol, GR_CONNECT, &current->signal->saved_ip);
55582+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
55583+ __u32 fakeip = 0;
55584+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55585+ current->role->roletype, cred->uid,
55586+ cred->gid, current->exec_file ?
55587+ gr_to_filename(current->exec_file->f_path.dentry,
55588+ current->exec_file->f_path.mnt) :
55589+ curr->filename, curr->filename,
55590+ &fakeip, 0, type,
55591+ protocol, GR_BIND, &current->signal->saved_ip);
55592+ }
55593+ /* we'll log when they use connect or bind */
55594+ goto exit;
55595+ }
55596+
55597+exit_fail:
55598+ if (domain == PF_INET)
55599+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
55600+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
55601+ else
55602+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
55603+ gr_socktype_to_name(type), protocol);
55604+
55605+ return 0;
55606+exit:
55607+ return 1;
55608+}
55609+
55610+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
55611+{
55612+ if ((ip->mode & mode) &&
55613+ (ip_port >= ip->low) &&
55614+ (ip_port <= ip->high) &&
55615+ ((ntohl(ip_addr) & our_netmask) ==
55616+ (ntohl(our_addr) & our_netmask))
55617+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
55618+ && (ip->type & (1 << type))) {
55619+ if (ip->mode & GR_INVERT)
55620+ return 2; // specifically denied
55621+ else
55622+ return 1; // allowed
55623+ }
55624+
55625+ return 0; // not specifically allowed, may continue parsing
55626+}
55627+
55628+static int
55629+gr_search_connectbind(const int full_mode, struct sock *sk,
55630+ struct sockaddr_in *addr, const int type)
55631+{
55632+ char iface[IFNAMSIZ] = {0};
55633+ struct acl_subject_label *curr;
55634+ struct acl_ip_label *ip;
55635+ struct inet_sock *isk;
55636+ struct net_device *dev;
55637+ struct in_device *idev;
55638+ unsigned long i;
55639+ int ret;
55640+ int mode = full_mode & (GR_BIND | GR_CONNECT);
55641+ __u32 ip_addr = 0;
55642+ __u32 our_addr;
55643+ __u32 our_netmask;
55644+ char *p;
55645+ __u16 ip_port = 0;
55646+ const struct cred *cred = current_cred();
55647+
55648+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
55649+ return 0;
55650+
55651+ curr = current->acl;
55652+ isk = inet_sk(sk);
55653+
55654+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
55655+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
55656+ addr->sin_addr.s_addr = curr->inaddr_any_override;
55657+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
55658+ struct sockaddr_in saddr;
55659+ int err;
55660+
55661+ saddr.sin_family = AF_INET;
55662+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
55663+ saddr.sin_port = isk->inet_sport;
55664+
55665+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55666+ if (err)
55667+ return err;
55668+
55669+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55670+ if (err)
55671+ return err;
55672+ }
55673+
55674+ if (!curr->ips)
55675+ return 0;
55676+
55677+ ip_addr = addr->sin_addr.s_addr;
55678+ ip_port = ntohs(addr->sin_port);
55679+
55680+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55681+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55682+ current->role->roletype, cred->uid,
55683+ cred->gid, current->exec_file ?
55684+ gr_to_filename(current->exec_file->f_path.dentry,
55685+ current->exec_file->f_path.mnt) :
55686+ curr->filename, curr->filename,
55687+ &ip_addr, ip_port, type,
55688+ sk->sk_protocol, mode, &current->signal->saved_ip);
55689+ return 0;
55690+ }
55691+
55692+ for (i = 0; i < curr->ip_num; i++) {
55693+ ip = *(curr->ips + i);
55694+ if (ip->iface != NULL) {
55695+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
55696+ p = strchr(iface, ':');
55697+ if (p != NULL)
55698+ *p = '\0';
55699+ dev = dev_get_by_name(sock_net(sk), iface);
55700+ if (dev == NULL)
55701+ continue;
55702+ idev = in_dev_get(dev);
55703+ if (idev == NULL) {
55704+ dev_put(dev);
55705+ continue;
55706+ }
55707+ rcu_read_lock();
55708+ for_ifa(idev) {
55709+ if (!strcmp(ip->iface, ifa->ifa_label)) {
55710+ our_addr = ifa->ifa_address;
55711+ our_netmask = 0xffffffff;
55712+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55713+ if (ret == 1) {
55714+ rcu_read_unlock();
55715+ in_dev_put(idev);
55716+ dev_put(dev);
55717+ return 0;
55718+ } else if (ret == 2) {
55719+ rcu_read_unlock();
55720+ in_dev_put(idev);
55721+ dev_put(dev);
55722+ goto denied;
55723+ }
55724+ }
55725+ } endfor_ifa(idev);
55726+ rcu_read_unlock();
55727+ in_dev_put(idev);
55728+ dev_put(dev);
55729+ } else {
55730+ our_addr = ip->addr;
55731+ our_netmask = ip->netmask;
55732+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55733+ if (ret == 1)
55734+ return 0;
55735+ else if (ret == 2)
55736+ goto denied;
55737+ }
55738+ }
55739+
55740+denied:
55741+ if (mode == GR_BIND)
55742+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55743+ else if (mode == GR_CONNECT)
55744+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55745+
55746+ return -EACCES;
55747+}
55748+
55749+int
55750+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55751+{
55752+ /* always allow disconnection of dgram sockets with connect */
55753+ if (addr->sin_family == AF_UNSPEC)
55754+ return 0;
55755+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55756+}
55757+
55758+int
55759+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55760+{
55761+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55762+}
55763+
55764+int gr_search_listen(struct socket *sock)
55765+{
55766+ struct sock *sk = sock->sk;
55767+ struct sockaddr_in addr;
55768+
55769+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55770+ addr.sin_port = inet_sk(sk)->inet_sport;
55771+
55772+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55773+}
55774+
55775+int gr_search_accept(struct socket *sock)
55776+{
55777+ struct sock *sk = sock->sk;
55778+ struct sockaddr_in addr;
55779+
55780+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55781+ addr.sin_port = inet_sk(sk)->inet_sport;
55782+
55783+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55784+}
55785+
55786+int
55787+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55788+{
55789+ if (addr)
55790+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55791+ else {
55792+ struct sockaddr_in sin;
55793+ const struct inet_sock *inet = inet_sk(sk);
55794+
55795+ sin.sin_addr.s_addr = inet->inet_daddr;
55796+ sin.sin_port = inet->inet_dport;
55797+
55798+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55799+ }
55800+}
55801+
55802+int
55803+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55804+{
55805+ struct sockaddr_in sin;
55806+
55807+ if (unlikely(skb->len < sizeof (struct udphdr)))
55808+ return 0; // skip this packet
55809+
55810+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55811+ sin.sin_port = udp_hdr(skb)->source;
55812+
55813+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55814+}
55815diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55816new file mode 100644
55817index 0000000..25f54ef
55818--- /dev/null
55819+++ b/grsecurity/gracl_learn.c
55820@@ -0,0 +1,207 @@
55821+#include <linux/kernel.h>
55822+#include <linux/mm.h>
55823+#include <linux/sched.h>
55824+#include <linux/poll.h>
55825+#include <linux/string.h>
55826+#include <linux/file.h>
55827+#include <linux/types.h>
55828+#include <linux/vmalloc.h>
55829+#include <linux/grinternal.h>
55830+
55831+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55832+ size_t count, loff_t *ppos);
55833+extern int gr_acl_is_enabled(void);
55834+
55835+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55836+static int gr_learn_attached;
55837+
55838+/* use a 512k buffer */
55839+#define LEARN_BUFFER_SIZE (512 * 1024)
55840+
55841+static DEFINE_SPINLOCK(gr_learn_lock);
55842+static DEFINE_MUTEX(gr_learn_user_mutex);
55843+
55844+/* we need to maintain two buffers, so that the kernel context of grlearn
55845+ uses a semaphore around the userspace copying, and the other kernel contexts
55846+ use a spinlock when copying into the buffer, since they cannot sleep
55847+*/
55848+static char *learn_buffer;
55849+static char *learn_buffer_user;
55850+static int learn_buffer_len;
55851+static int learn_buffer_user_len;
55852+
55853+static ssize_t
55854+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55855+{
55856+ DECLARE_WAITQUEUE(wait, current);
55857+ ssize_t retval = 0;
55858+
55859+ add_wait_queue(&learn_wait, &wait);
55860+ set_current_state(TASK_INTERRUPTIBLE);
55861+ do {
55862+ mutex_lock(&gr_learn_user_mutex);
55863+ spin_lock(&gr_learn_lock);
55864+ if (learn_buffer_len)
55865+ break;
55866+ spin_unlock(&gr_learn_lock);
55867+ mutex_unlock(&gr_learn_user_mutex);
55868+ if (file->f_flags & O_NONBLOCK) {
55869+ retval = -EAGAIN;
55870+ goto out;
55871+ }
55872+ if (signal_pending(current)) {
55873+ retval = -ERESTARTSYS;
55874+ goto out;
55875+ }
55876+
55877+ schedule();
55878+ } while (1);
55879+
55880+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55881+ learn_buffer_user_len = learn_buffer_len;
55882+ retval = learn_buffer_len;
55883+ learn_buffer_len = 0;
55884+
55885+ spin_unlock(&gr_learn_lock);
55886+
55887+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55888+ retval = -EFAULT;
55889+
55890+ mutex_unlock(&gr_learn_user_mutex);
55891+out:
55892+ set_current_state(TASK_RUNNING);
55893+ remove_wait_queue(&learn_wait, &wait);
55894+ return retval;
55895+}
55896+
55897+static unsigned int
55898+poll_learn(struct file * file, poll_table * wait)
55899+{
55900+ poll_wait(file, &learn_wait, wait);
55901+
55902+ if (learn_buffer_len)
55903+ return (POLLIN | POLLRDNORM);
55904+
55905+ return 0;
55906+}
55907+
55908+void
55909+gr_clear_learn_entries(void)
55910+{
55911+ char *tmp;
55912+
55913+ mutex_lock(&gr_learn_user_mutex);
55914+ spin_lock(&gr_learn_lock);
55915+ tmp = learn_buffer;
55916+ learn_buffer = NULL;
55917+ spin_unlock(&gr_learn_lock);
55918+ if (tmp)
55919+ vfree(tmp);
55920+ if (learn_buffer_user != NULL) {
55921+ vfree(learn_buffer_user);
55922+ learn_buffer_user = NULL;
55923+ }
55924+ learn_buffer_len = 0;
55925+ mutex_unlock(&gr_learn_user_mutex);
55926+
55927+ return;
55928+}
55929+
55930+void
55931+gr_add_learn_entry(const char *fmt, ...)
55932+{
55933+ va_list args;
55934+ unsigned int len;
55935+
55936+ if (!gr_learn_attached)
55937+ return;
55938+
55939+ spin_lock(&gr_learn_lock);
55940+
55941+ /* leave a gap at the end so we know when it's "full" but don't have to
55942+ compute the exact length of the string we're trying to append
55943+ */
55944+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
55945+ spin_unlock(&gr_learn_lock);
55946+ wake_up_interruptible(&learn_wait);
55947+ return;
55948+ }
55949+ if (learn_buffer == NULL) {
55950+ spin_unlock(&gr_learn_lock);
55951+ return;
55952+ }
55953+
55954+ va_start(args, fmt);
55955+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
55956+ va_end(args);
55957+
55958+ learn_buffer_len += len + 1;
55959+
55960+ spin_unlock(&gr_learn_lock);
55961+ wake_up_interruptible(&learn_wait);
55962+
55963+ return;
55964+}
55965+
55966+static int
55967+open_learn(struct inode *inode, struct file *file)
55968+{
55969+ if (file->f_mode & FMODE_READ && gr_learn_attached)
55970+ return -EBUSY;
55971+ if (file->f_mode & FMODE_READ) {
55972+ int retval = 0;
55973+ mutex_lock(&gr_learn_user_mutex);
55974+ if (learn_buffer == NULL)
55975+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
55976+ if (learn_buffer_user == NULL)
55977+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
55978+ if (learn_buffer == NULL) {
55979+ retval = -ENOMEM;
55980+ goto out_error;
55981+ }
55982+ if (learn_buffer_user == NULL) {
55983+ retval = -ENOMEM;
55984+ goto out_error;
55985+ }
55986+ learn_buffer_len = 0;
55987+ learn_buffer_user_len = 0;
55988+ gr_learn_attached = 1;
55989+out_error:
55990+ mutex_unlock(&gr_learn_user_mutex);
55991+ return retval;
55992+ }
55993+ return 0;
55994+}
55995+
55996+static int
55997+close_learn(struct inode *inode, struct file *file)
55998+{
55999+ if (file->f_mode & FMODE_READ) {
56000+ char *tmp = NULL;
56001+ mutex_lock(&gr_learn_user_mutex);
56002+ spin_lock(&gr_learn_lock);
56003+ tmp = learn_buffer;
56004+ learn_buffer = NULL;
56005+ spin_unlock(&gr_learn_lock);
56006+ if (tmp)
56007+ vfree(tmp);
56008+ if (learn_buffer_user != NULL) {
56009+ vfree(learn_buffer_user);
56010+ learn_buffer_user = NULL;
56011+ }
56012+ learn_buffer_len = 0;
56013+ learn_buffer_user_len = 0;
56014+ gr_learn_attached = 0;
56015+ mutex_unlock(&gr_learn_user_mutex);
56016+ }
56017+
56018+ return 0;
56019+}
56020+
56021+const struct file_operations grsec_fops = {
56022+ .read = read_learn,
56023+ .write = write_grsec_handler,
56024+ .open = open_learn,
56025+ .release = close_learn,
56026+ .poll = poll_learn,
56027+};
56028diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
56029new file mode 100644
56030index 0000000..39645c9
56031--- /dev/null
56032+++ b/grsecurity/gracl_res.c
56033@@ -0,0 +1,68 @@
56034+#include <linux/kernel.h>
56035+#include <linux/sched.h>
56036+#include <linux/gracl.h>
56037+#include <linux/grinternal.h>
56038+
56039+static const char *restab_log[] = {
56040+ [RLIMIT_CPU] = "RLIMIT_CPU",
56041+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
56042+ [RLIMIT_DATA] = "RLIMIT_DATA",
56043+ [RLIMIT_STACK] = "RLIMIT_STACK",
56044+ [RLIMIT_CORE] = "RLIMIT_CORE",
56045+ [RLIMIT_RSS] = "RLIMIT_RSS",
56046+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
56047+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
56048+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
56049+ [RLIMIT_AS] = "RLIMIT_AS",
56050+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
56051+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
56052+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
56053+ [RLIMIT_NICE] = "RLIMIT_NICE",
56054+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
56055+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
56056+ [GR_CRASH_RES] = "RLIMIT_CRASH"
56057+};
56058+
56059+void
56060+gr_log_resource(const struct task_struct *task,
56061+ const int res, const unsigned long wanted, const int gt)
56062+{
56063+ const struct cred *cred;
56064+ unsigned long rlim;
56065+
56066+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
56067+ return;
56068+
56069+ // not yet supported resource
56070+ if (unlikely(!restab_log[res]))
56071+ return;
56072+
56073+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
56074+ rlim = task_rlimit_max(task, res);
56075+ else
56076+ rlim = task_rlimit(task, res);
56077+
56078+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
56079+ return;
56080+
56081+ rcu_read_lock();
56082+ cred = __task_cred(task);
56083+
56084+ if (res == RLIMIT_NPROC &&
56085+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
56086+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
56087+ goto out_rcu_unlock;
56088+ else if (res == RLIMIT_MEMLOCK &&
56089+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
56090+ goto out_rcu_unlock;
56091+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
56092+ goto out_rcu_unlock;
56093+ rcu_read_unlock();
56094+
56095+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
56096+
56097+ return;
56098+out_rcu_unlock:
56099+ rcu_read_unlock();
56100+ return;
56101+}
56102diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
56103new file mode 100644
56104index 0000000..5556be3
56105--- /dev/null
56106+++ b/grsecurity/gracl_segv.c
56107@@ -0,0 +1,299 @@
56108+#include <linux/kernel.h>
56109+#include <linux/mm.h>
56110+#include <asm/uaccess.h>
56111+#include <asm/errno.h>
56112+#include <asm/mman.h>
56113+#include <net/sock.h>
56114+#include <linux/file.h>
56115+#include <linux/fs.h>
56116+#include <linux/net.h>
56117+#include <linux/in.h>
56118+#include <linux/slab.h>
56119+#include <linux/types.h>
56120+#include <linux/sched.h>
56121+#include <linux/timer.h>
56122+#include <linux/gracl.h>
56123+#include <linux/grsecurity.h>
56124+#include <linux/grinternal.h>
56125+
56126+static struct crash_uid *uid_set;
56127+static unsigned short uid_used;
56128+static DEFINE_SPINLOCK(gr_uid_lock);
56129+extern rwlock_t gr_inode_lock;
56130+extern struct acl_subject_label *
56131+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
56132+ struct acl_role_label *role);
56133+
56134+#ifdef CONFIG_BTRFS_FS
56135+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56136+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56137+#endif
56138+
56139+static inline dev_t __get_dev(const struct dentry *dentry)
56140+{
56141+#ifdef CONFIG_BTRFS_FS
56142+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56143+ return get_btrfs_dev_from_inode(dentry->d_inode);
56144+ else
56145+#endif
56146+ return dentry->d_inode->i_sb->s_dev;
56147+}
56148+
56149+int
56150+gr_init_uidset(void)
56151+{
56152+ uid_set =
56153+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
56154+ uid_used = 0;
56155+
56156+ return uid_set ? 1 : 0;
56157+}
56158+
56159+void
56160+gr_free_uidset(void)
56161+{
56162+ if (uid_set)
56163+ kfree(uid_set);
56164+
56165+ return;
56166+}
56167+
56168+int
56169+gr_find_uid(const uid_t uid)
56170+{
56171+ struct crash_uid *tmp = uid_set;
56172+ uid_t buid;
56173+ int low = 0, high = uid_used - 1, mid;
56174+
56175+ while (high >= low) {
56176+ mid = (low + high) >> 1;
56177+ buid = tmp[mid].uid;
56178+ if (buid == uid)
56179+ return mid;
56180+ if (buid > uid)
56181+ high = mid - 1;
56182+ if (buid < uid)
56183+ low = mid + 1;
56184+ }
56185+
56186+ return -1;
56187+}
56188+
56189+static __inline__ void
56190+gr_insertsort(void)
56191+{
56192+ unsigned short i, j;
56193+ struct crash_uid index;
56194+
56195+ for (i = 1; i < uid_used; i++) {
56196+ index = uid_set[i];
56197+ j = i;
56198+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
56199+ uid_set[j] = uid_set[j - 1];
56200+ j--;
56201+ }
56202+ uid_set[j] = index;
56203+ }
56204+
56205+ return;
56206+}
56207+
56208+static __inline__ void
56209+gr_insert_uid(const uid_t uid, const unsigned long expires)
56210+{
56211+ int loc;
56212+
56213+ if (uid_used == GR_UIDTABLE_MAX)
56214+ return;
56215+
56216+ loc = gr_find_uid(uid);
56217+
56218+ if (loc >= 0) {
56219+ uid_set[loc].expires = expires;
56220+ return;
56221+ }
56222+
56223+ uid_set[uid_used].uid = uid;
56224+ uid_set[uid_used].expires = expires;
56225+ uid_used++;
56226+
56227+ gr_insertsort();
56228+
56229+ return;
56230+}
56231+
56232+void
56233+gr_remove_uid(const unsigned short loc)
56234+{
56235+ unsigned short i;
56236+
56237+ for (i = loc + 1; i < uid_used; i++)
56238+ uid_set[i - 1] = uid_set[i];
56239+
56240+ uid_used--;
56241+
56242+ return;
56243+}
56244+
56245+int
56246+gr_check_crash_uid(const uid_t uid)
56247+{
56248+ int loc;
56249+ int ret = 0;
56250+
56251+ if (unlikely(!gr_acl_is_enabled()))
56252+ return 0;
56253+
56254+ spin_lock(&gr_uid_lock);
56255+ loc = gr_find_uid(uid);
56256+
56257+ if (loc < 0)
56258+ goto out_unlock;
56259+
56260+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
56261+ gr_remove_uid(loc);
56262+ else
56263+ ret = 1;
56264+
56265+out_unlock:
56266+ spin_unlock(&gr_uid_lock);
56267+ return ret;
56268+}
56269+
56270+static __inline__ int
56271+proc_is_setxid(const struct cred *cred)
56272+{
56273+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
56274+ cred->uid != cred->fsuid)
56275+ return 1;
56276+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
56277+ cred->gid != cred->fsgid)
56278+ return 1;
56279+
56280+ return 0;
56281+}
56282+
56283+extern int gr_fake_force_sig(int sig, struct task_struct *t);
56284+
56285+void
56286+gr_handle_crash(struct task_struct *task, const int sig)
56287+{
56288+ struct acl_subject_label *curr;
56289+ struct task_struct *tsk, *tsk2;
56290+ const struct cred *cred;
56291+ const struct cred *cred2;
56292+
56293+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
56294+ return;
56295+
56296+ if (unlikely(!gr_acl_is_enabled()))
56297+ return;
56298+
56299+ curr = task->acl;
56300+
56301+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
56302+ return;
56303+
56304+ if (time_before_eq(curr->expires, get_seconds())) {
56305+ curr->expires = 0;
56306+ curr->crashes = 0;
56307+ }
56308+
56309+ curr->crashes++;
56310+
56311+ if (!curr->expires)
56312+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
56313+
56314+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56315+ time_after(curr->expires, get_seconds())) {
56316+ rcu_read_lock();
56317+ cred = __task_cred(task);
56318+ if (cred->uid && proc_is_setxid(cred)) {
56319+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56320+ spin_lock(&gr_uid_lock);
56321+ gr_insert_uid(cred->uid, curr->expires);
56322+ spin_unlock(&gr_uid_lock);
56323+ curr->expires = 0;
56324+ curr->crashes = 0;
56325+ read_lock(&tasklist_lock);
56326+ do_each_thread(tsk2, tsk) {
56327+ cred2 = __task_cred(tsk);
56328+ if (tsk != task && cred2->uid == cred->uid)
56329+ gr_fake_force_sig(SIGKILL, tsk);
56330+ } while_each_thread(tsk2, tsk);
56331+ read_unlock(&tasklist_lock);
56332+ } else {
56333+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56334+ read_lock(&tasklist_lock);
56335+ read_lock(&grsec_exec_file_lock);
56336+ do_each_thread(tsk2, tsk) {
56337+ if (likely(tsk != task)) {
56338+ // if this thread has the same subject as the one that triggered
56339+ // RES_CRASH and it's the same binary, kill it
56340+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
56341+ gr_fake_force_sig(SIGKILL, tsk);
56342+ }
56343+ } while_each_thread(tsk2, tsk);
56344+ read_unlock(&grsec_exec_file_lock);
56345+ read_unlock(&tasklist_lock);
56346+ }
56347+ rcu_read_unlock();
56348+ }
56349+
56350+ return;
56351+}
56352+
56353+int
56354+gr_check_crash_exec(const struct file *filp)
56355+{
56356+ struct acl_subject_label *curr;
56357+
56358+ if (unlikely(!gr_acl_is_enabled()))
56359+ return 0;
56360+
56361+ read_lock(&gr_inode_lock);
56362+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
56363+ __get_dev(filp->f_path.dentry),
56364+ current->role);
56365+ read_unlock(&gr_inode_lock);
56366+
56367+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
56368+ (!curr->crashes && !curr->expires))
56369+ return 0;
56370+
56371+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56372+ time_after(curr->expires, get_seconds()))
56373+ return 1;
56374+ else if (time_before_eq(curr->expires, get_seconds())) {
56375+ curr->crashes = 0;
56376+ curr->expires = 0;
56377+ }
56378+
56379+ return 0;
56380+}
56381+
56382+void
56383+gr_handle_alertkill(struct task_struct *task)
56384+{
56385+ struct acl_subject_label *curracl;
56386+ __u32 curr_ip;
56387+ struct task_struct *p, *p2;
56388+
56389+ if (unlikely(!gr_acl_is_enabled()))
56390+ return;
56391+
56392+ curracl = task->acl;
56393+ curr_ip = task->signal->curr_ip;
56394+
56395+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
56396+ read_lock(&tasklist_lock);
56397+ do_each_thread(p2, p) {
56398+ if (p->signal->curr_ip == curr_ip)
56399+ gr_fake_force_sig(SIGKILL, p);
56400+ } while_each_thread(p2, p);
56401+ read_unlock(&tasklist_lock);
56402+ } else if (curracl->mode & GR_KILLPROC)
56403+ gr_fake_force_sig(SIGKILL, task);
56404+
56405+ return;
56406+}
56407diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
56408new file mode 100644
56409index 0000000..9d83a69
56410--- /dev/null
56411+++ b/grsecurity/gracl_shm.c
56412@@ -0,0 +1,40 @@
56413+#include <linux/kernel.h>
56414+#include <linux/mm.h>
56415+#include <linux/sched.h>
56416+#include <linux/file.h>
56417+#include <linux/ipc.h>
56418+#include <linux/gracl.h>
56419+#include <linux/grsecurity.h>
56420+#include <linux/grinternal.h>
56421+
56422+int
56423+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56424+ const time_t shm_createtime, const uid_t cuid, const int shmid)
56425+{
56426+ struct task_struct *task;
56427+
56428+ if (!gr_acl_is_enabled())
56429+ return 1;
56430+
56431+ rcu_read_lock();
56432+ read_lock(&tasklist_lock);
56433+
56434+ task = find_task_by_vpid(shm_cprid);
56435+
56436+ if (unlikely(!task))
56437+ task = find_task_by_vpid(shm_lapid);
56438+
56439+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
56440+ (task->pid == shm_lapid)) &&
56441+ (task->acl->mode & GR_PROTSHM) &&
56442+ (task->acl != current->acl))) {
56443+ read_unlock(&tasklist_lock);
56444+ rcu_read_unlock();
56445+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
56446+ return 0;
56447+ }
56448+ read_unlock(&tasklist_lock);
56449+ rcu_read_unlock();
56450+
56451+ return 1;
56452+}
56453diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
56454new file mode 100644
56455index 0000000..bc0be01
56456--- /dev/null
56457+++ b/grsecurity/grsec_chdir.c
56458@@ -0,0 +1,19 @@
56459+#include <linux/kernel.h>
56460+#include <linux/sched.h>
56461+#include <linux/fs.h>
56462+#include <linux/file.h>
56463+#include <linux/grsecurity.h>
56464+#include <linux/grinternal.h>
56465+
56466+void
56467+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
56468+{
56469+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56470+ if ((grsec_enable_chdir && grsec_enable_group &&
56471+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
56472+ !grsec_enable_group)) {
56473+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
56474+ }
56475+#endif
56476+ return;
56477+}
56478diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
56479new file mode 100644
56480index 0000000..9807ee2
56481--- /dev/null
56482+++ b/grsecurity/grsec_chroot.c
56483@@ -0,0 +1,368 @@
56484+#include <linux/kernel.h>
56485+#include <linux/module.h>
56486+#include <linux/sched.h>
56487+#include <linux/file.h>
56488+#include <linux/fs.h>
56489+#include <linux/mount.h>
56490+#include <linux/types.h>
56491+#include "../fs/mount.h"
56492+#include <linux/grsecurity.h>
56493+#include <linux/grinternal.h>
56494+
56495+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
56496+{
56497+#ifdef CONFIG_GRKERNSEC
56498+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
56499+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
56500+ task->gr_is_chrooted = 1;
56501+ else
56502+ task->gr_is_chrooted = 0;
56503+
56504+ task->gr_chroot_dentry = path->dentry;
56505+#endif
56506+ return;
56507+}
56508+
56509+void gr_clear_chroot_entries(struct task_struct *task)
56510+{
56511+#ifdef CONFIG_GRKERNSEC
56512+ task->gr_is_chrooted = 0;
56513+ task->gr_chroot_dentry = NULL;
56514+#endif
56515+ return;
56516+}
56517+
56518+int
56519+gr_handle_chroot_unix(const pid_t pid)
56520+{
56521+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56522+ struct task_struct *p;
56523+
56524+ if (unlikely(!grsec_enable_chroot_unix))
56525+ return 1;
56526+
56527+ if (likely(!proc_is_chrooted(current)))
56528+ return 1;
56529+
56530+ rcu_read_lock();
56531+ read_lock(&tasklist_lock);
56532+ p = find_task_by_vpid_unrestricted(pid);
56533+ if (unlikely(p && !have_same_root(current, p))) {
56534+ read_unlock(&tasklist_lock);
56535+ rcu_read_unlock();
56536+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
56537+ return 0;
56538+ }
56539+ read_unlock(&tasklist_lock);
56540+ rcu_read_unlock();
56541+#endif
56542+ return 1;
56543+}
56544+
56545+int
56546+gr_handle_chroot_nice(void)
56547+{
56548+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56549+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
56550+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
56551+ return -EPERM;
56552+ }
56553+#endif
56554+ return 0;
56555+}
56556+
56557+int
56558+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
56559+{
56560+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56561+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
56562+ && proc_is_chrooted(current)) {
56563+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
56564+ return -EACCES;
56565+ }
56566+#endif
56567+ return 0;
56568+}
56569+
56570+int
56571+gr_handle_chroot_rawio(const struct inode *inode)
56572+{
56573+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56574+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56575+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
56576+ return 1;
56577+#endif
56578+ return 0;
56579+}
56580+
56581+int
56582+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
56583+{
56584+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56585+ struct task_struct *p;
56586+ int ret = 0;
56587+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
56588+ return ret;
56589+
56590+ read_lock(&tasklist_lock);
56591+ do_each_pid_task(pid, type, p) {
56592+ if (!have_same_root(current, p)) {
56593+ ret = 1;
56594+ goto out;
56595+ }
56596+ } while_each_pid_task(pid, type, p);
56597+out:
56598+ read_unlock(&tasklist_lock);
56599+ return ret;
56600+#endif
56601+ return 0;
56602+}
56603+
56604+int
56605+gr_pid_is_chrooted(struct task_struct *p)
56606+{
56607+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56608+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
56609+ return 0;
56610+
56611+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
56612+ !have_same_root(current, p)) {
56613+ return 1;
56614+ }
56615+#endif
56616+ return 0;
56617+}
56618+
56619+EXPORT_SYMBOL(gr_pid_is_chrooted);
56620+
56621+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
56622+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
56623+{
56624+ struct path path, currentroot;
56625+ int ret = 0;
56626+
56627+ path.dentry = (struct dentry *)u_dentry;
56628+ path.mnt = (struct vfsmount *)u_mnt;
56629+ get_fs_root(current->fs, &currentroot);
56630+ if (path_is_under(&path, &currentroot))
56631+ ret = 1;
56632+ path_put(&currentroot);
56633+
56634+ return ret;
56635+}
56636+#endif
56637+
56638+int
56639+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
56640+{
56641+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56642+ if (!grsec_enable_chroot_fchdir)
56643+ return 1;
56644+
56645+ if (!proc_is_chrooted(current))
56646+ return 1;
56647+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
56648+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
56649+ return 0;
56650+ }
56651+#endif
56652+ return 1;
56653+}
56654+
56655+int
56656+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56657+ const time_t shm_createtime)
56658+{
56659+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56660+ struct task_struct *p;
56661+ time_t starttime;
56662+
56663+ if (unlikely(!grsec_enable_chroot_shmat))
56664+ return 1;
56665+
56666+ if (likely(!proc_is_chrooted(current)))
56667+ return 1;
56668+
56669+ rcu_read_lock();
56670+ read_lock(&tasklist_lock);
56671+
56672+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
56673+ starttime = p->start_time.tv_sec;
56674+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
56675+ if (have_same_root(current, p)) {
56676+ goto allow;
56677+ } else {
56678+ read_unlock(&tasklist_lock);
56679+ rcu_read_unlock();
56680+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56681+ return 0;
56682+ }
56683+ }
56684+ /* creator exited, pid reuse, fall through to next check */
56685+ }
56686+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56687+ if (unlikely(!have_same_root(current, p))) {
56688+ read_unlock(&tasklist_lock);
56689+ rcu_read_unlock();
56690+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56691+ return 0;
56692+ }
56693+ }
56694+
56695+allow:
56696+ read_unlock(&tasklist_lock);
56697+ rcu_read_unlock();
56698+#endif
56699+ return 1;
56700+}
56701+
56702+void
56703+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56704+{
56705+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56706+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56707+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56708+#endif
56709+ return;
56710+}
56711+
56712+int
56713+gr_handle_chroot_mknod(const struct dentry *dentry,
56714+ const struct vfsmount *mnt, const int mode)
56715+{
56716+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56717+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56718+ proc_is_chrooted(current)) {
56719+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56720+ return -EPERM;
56721+ }
56722+#endif
56723+ return 0;
56724+}
56725+
56726+int
56727+gr_handle_chroot_mount(const struct dentry *dentry,
56728+ const struct vfsmount *mnt, const char *dev_name)
56729+{
56730+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56731+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
56732+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
56733+ return -EPERM;
56734+ }
56735+#endif
56736+ return 0;
56737+}
56738+
56739+int
56740+gr_handle_chroot_pivot(void)
56741+{
56742+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56743+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56744+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56745+ return -EPERM;
56746+ }
56747+#endif
56748+ return 0;
56749+}
56750+
56751+int
56752+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56753+{
56754+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56755+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56756+ !gr_is_outside_chroot(dentry, mnt)) {
56757+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56758+ return -EPERM;
56759+ }
56760+#endif
56761+ return 0;
56762+}
56763+
56764+extern const char *captab_log[];
56765+extern int captab_log_entries;
56766+
56767+int
56768+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56769+{
56770+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56771+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56772+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56773+ if (cap_raised(chroot_caps, cap)) {
56774+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
56775+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
56776+ }
56777+ return 0;
56778+ }
56779+ }
56780+#endif
56781+ return 1;
56782+}
56783+
56784+int
56785+gr_chroot_is_capable(const int cap)
56786+{
56787+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56788+ return gr_task_chroot_is_capable(current, current_cred(), cap);
56789+#endif
56790+ return 1;
56791+}
56792+
56793+int
56794+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
56795+{
56796+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56797+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56798+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56799+ if (cap_raised(chroot_caps, cap)) {
56800+ return 0;
56801+ }
56802+ }
56803+#endif
56804+ return 1;
56805+}
56806+
56807+int
56808+gr_chroot_is_capable_nolog(const int cap)
56809+{
56810+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56811+ return gr_task_chroot_is_capable_nolog(current, cap);
56812+#endif
56813+ return 1;
56814+}
56815+
56816+int
56817+gr_handle_chroot_sysctl(const int op)
56818+{
56819+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56820+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56821+ proc_is_chrooted(current))
56822+ return -EACCES;
56823+#endif
56824+ return 0;
56825+}
56826+
56827+void
56828+gr_handle_chroot_chdir(struct path *path)
56829+{
56830+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56831+ if (grsec_enable_chroot_chdir)
56832+ set_fs_pwd(current->fs, path);
56833+#endif
56834+ return;
56835+}
56836+
56837+int
56838+gr_handle_chroot_chmod(const struct dentry *dentry,
56839+ const struct vfsmount *mnt, const int mode)
56840+{
56841+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56842+ /* allow chmod +s on directories, but not files */
56843+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
56844+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56845+ proc_is_chrooted(current)) {
56846+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56847+ return -EPERM;
56848+ }
56849+#endif
56850+ return 0;
56851+}
56852diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56853new file mode 100644
56854index 0000000..213ad8b
56855--- /dev/null
56856+++ b/grsecurity/grsec_disabled.c
56857@@ -0,0 +1,437 @@
56858+#include <linux/kernel.h>
56859+#include <linux/module.h>
56860+#include <linux/sched.h>
56861+#include <linux/file.h>
56862+#include <linux/fs.h>
56863+#include <linux/kdev_t.h>
56864+#include <linux/net.h>
56865+#include <linux/in.h>
56866+#include <linux/ip.h>
56867+#include <linux/skbuff.h>
56868+#include <linux/sysctl.h>
56869+
56870+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56871+void
56872+pax_set_initial_flags(struct linux_binprm *bprm)
56873+{
56874+ return;
56875+}
56876+#endif
56877+
56878+#ifdef CONFIG_SYSCTL
56879+__u32
56880+gr_handle_sysctl(const struct ctl_table * table, const int op)
56881+{
56882+ return 0;
56883+}
56884+#endif
56885+
56886+#ifdef CONFIG_TASKSTATS
56887+int gr_is_taskstats_denied(int pid)
56888+{
56889+ return 0;
56890+}
56891+#endif
56892+
56893+int
56894+gr_acl_is_enabled(void)
56895+{
56896+ return 0;
56897+}
56898+
56899+void
56900+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56901+{
56902+ return;
56903+}
56904+
56905+int
56906+gr_handle_rawio(const struct inode *inode)
56907+{
56908+ return 0;
56909+}
56910+
56911+void
56912+gr_acl_handle_psacct(struct task_struct *task, const long code)
56913+{
56914+ return;
56915+}
56916+
56917+int
56918+gr_handle_ptrace(struct task_struct *task, const long request)
56919+{
56920+ return 0;
56921+}
56922+
56923+int
56924+gr_handle_proc_ptrace(struct task_struct *task)
56925+{
56926+ return 0;
56927+}
56928+
56929+void
56930+gr_learn_resource(const struct task_struct *task,
56931+ const int res, const unsigned long wanted, const int gt)
56932+{
56933+ return;
56934+}
56935+
56936+int
56937+gr_set_acls(const int type)
56938+{
56939+ return 0;
56940+}
56941+
56942+int
56943+gr_check_hidden_task(const struct task_struct *tsk)
56944+{
56945+ return 0;
56946+}
56947+
56948+int
56949+gr_check_protected_task(const struct task_struct *task)
56950+{
56951+ return 0;
56952+}
56953+
56954+int
56955+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
56956+{
56957+ return 0;
56958+}
56959+
56960+void
56961+gr_copy_label(struct task_struct *tsk)
56962+{
56963+ return;
56964+}
56965+
56966+void
56967+gr_set_pax_flags(struct task_struct *task)
56968+{
56969+ return;
56970+}
56971+
56972+int
56973+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
56974+ const int unsafe_share)
56975+{
56976+ return 0;
56977+}
56978+
56979+void
56980+gr_handle_delete(const ino_t ino, const dev_t dev)
56981+{
56982+ return;
56983+}
56984+
56985+void
56986+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56987+{
56988+ return;
56989+}
56990+
56991+void
56992+gr_handle_crash(struct task_struct *task, const int sig)
56993+{
56994+ return;
56995+}
56996+
56997+int
56998+gr_check_crash_exec(const struct file *filp)
56999+{
57000+ return 0;
57001+}
57002+
57003+int
57004+gr_check_crash_uid(const uid_t uid)
57005+{
57006+ return 0;
57007+}
57008+
57009+void
57010+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
57011+ struct dentry *old_dentry,
57012+ struct dentry *new_dentry,
57013+ struct vfsmount *mnt, const __u8 replace)
57014+{
57015+ return;
57016+}
57017+
57018+int
57019+gr_search_socket(const int family, const int type, const int protocol)
57020+{
57021+ return 1;
57022+}
57023+
57024+int
57025+gr_search_connectbind(const int mode, const struct socket *sock,
57026+ const struct sockaddr_in *addr)
57027+{
57028+ return 0;
57029+}
57030+
57031+void
57032+gr_handle_alertkill(struct task_struct *task)
57033+{
57034+ return;
57035+}
57036+
57037+__u32
57038+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
57039+{
57040+ return 1;
57041+}
57042+
57043+__u32
57044+gr_acl_handle_hidden_file(const struct dentry * dentry,
57045+ const struct vfsmount * mnt)
57046+{
57047+ return 1;
57048+}
57049+
57050+__u32
57051+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
57052+ int acc_mode)
57053+{
57054+ return 1;
57055+}
57056+
57057+__u32
57058+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
57059+{
57060+ return 1;
57061+}
57062+
57063+__u32
57064+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
57065+{
57066+ return 1;
57067+}
57068+
57069+int
57070+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
57071+ unsigned int *vm_flags)
57072+{
57073+ return 1;
57074+}
57075+
57076+__u32
57077+gr_acl_handle_truncate(const struct dentry * dentry,
57078+ const struct vfsmount * mnt)
57079+{
57080+ return 1;
57081+}
57082+
57083+__u32
57084+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
57085+{
57086+ return 1;
57087+}
57088+
57089+__u32
57090+gr_acl_handle_access(const struct dentry * dentry,
57091+ const struct vfsmount * mnt, const int fmode)
57092+{
57093+ return 1;
57094+}
57095+
57096+__u32
57097+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
57098+ umode_t *mode)
57099+{
57100+ return 1;
57101+}
57102+
57103+__u32
57104+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
57105+{
57106+ return 1;
57107+}
57108+
57109+__u32
57110+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57111+{
57112+ return 1;
57113+}
57114+
57115+void
57116+grsecurity_init(void)
57117+{
57118+ return;
57119+}
57120+
57121+umode_t gr_acl_umask(void)
57122+{
57123+ return 0;
57124+}
57125+
57126+__u32
57127+gr_acl_handle_mknod(const struct dentry * new_dentry,
57128+ const struct dentry * parent_dentry,
57129+ const struct vfsmount * parent_mnt,
57130+ const int mode)
57131+{
57132+ return 1;
57133+}
57134+
57135+__u32
57136+gr_acl_handle_mkdir(const struct dentry * new_dentry,
57137+ const struct dentry * parent_dentry,
57138+ const struct vfsmount * parent_mnt)
57139+{
57140+ return 1;
57141+}
57142+
57143+__u32
57144+gr_acl_handle_symlink(const struct dentry * new_dentry,
57145+ const struct dentry * parent_dentry,
57146+ const struct vfsmount * parent_mnt, const char *from)
57147+{
57148+ return 1;
57149+}
57150+
57151+__u32
57152+gr_acl_handle_link(const struct dentry * new_dentry,
57153+ const struct dentry * parent_dentry,
57154+ const struct vfsmount * parent_mnt,
57155+ const struct dentry * old_dentry,
57156+ const struct vfsmount * old_mnt, const char *to)
57157+{
57158+ return 1;
57159+}
57160+
57161+int
57162+gr_acl_handle_rename(const struct dentry *new_dentry,
57163+ const struct dentry *parent_dentry,
57164+ const struct vfsmount *parent_mnt,
57165+ const struct dentry *old_dentry,
57166+ const struct inode *old_parent_inode,
57167+ const struct vfsmount *old_mnt, const char *newname)
57168+{
57169+ return 0;
57170+}
57171+
57172+int
57173+gr_acl_handle_filldir(const struct file *file, const char *name,
57174+ const int namelen, const ino_t ino)
57175+{
57176+ return 1;
57177+}
57178+
57179+int
57180+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57181+ const time_t shm_createtime, const uid_t cuid, const int shmid)
57182+{
57183+ return 1;
57184+}
57185+
57186+int
57187+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
57188+{
57189+ return 0;
57190+}
57191+
57192+int
57193+gr_search_accept(const struct socket *sock)
57194+{
57195+ return 0;
57196+}
57197+
57198+int
57199+gr_search_listen(const struct socket *sock)
57200+{
57201+ return 0;
57202+}
57203+
57204+int
57205+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
57206+{
57207+ return 0;
57208+}
57209+
57210+__u32
57211+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
57212+{
57213+ return 1;
57214+}
57215+
57216+__u32
57217+gr_acl_handle_creat(const struct dentry * dentry,
57218+ const struct dentry * p_dentry,
57219+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
57220+ const int imode)
57221+{
57222+ return 1;
57223+}
57224+
57225+void
57226+gr_acl_handle_exit(void)
57227+{
57228+ return;
57229+}
57230+
57231+int
57232+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
57233+{
57234+ return 1;
57235+}
57236+
57237+void
57238+gr_set_role_label(const uid_t uid, const gid_t gid)
57239+{
57240+ return;
57241+}
57242+
57243+int
57244+gr_acl_handle_procpidmem(const struct task_struct *task)
57245+{
57246+ return 0;
57247+}
57248+
57249+int
57250+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
57251+{
57252+ return 0;
57253+}
57254+
57255+int
57256+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
57257+{
57258+ return 0;
57259+}
57260+
57261+void
57262+gr_set_kernel_label(struct task_struct *task)
57263+{
57264+ return;
57265+}
57266+
57267+int
57268+gr_check_user_change(int real, int effective, int fs)
57269+{
57270+ return 0;
57271+}
57272+
57273+int
57274+gr_check_group_change(int real, int effective, int fs)
57275+{
57276+ return 0;
57277+}
57278+
57279+int gr_acl_enable_at_secure(void)
57280+{
57281+ return 0;
57282+}
57283+
57284+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57285+{
57286+ return dentry->d_inode->i_sb->s_dev;
57287+}
57288+
57289+EXPORT_SYMBOL(gr_learn_resource);
57290+EXPORT_SYMBOL(gr_set_kernel_label);
57291+#ifdef CONFIG_SECURITY
57292+EXPORT_SYMBOL(gr_check_user_change);
57293+EXPORT_SYMBOL(gr_check_group_change);
57294+#endif
57295diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
57296new file mode 100644
57297index 0000000..abfa971
57298--- /dev/null
57299+++ b/grsecurity/grsec_exec.c
57300@@ -0,0 +1,174 @@
57301+#include <linux/kernel.h>
57302+#include <linux/sched.h>
57303+#include <linux/file.h>
57304+#include <linux/binfmts.h>
57305+#include <linux/fs.h>
57306+#include <linux/types.h>
57307+#include <linux/grdefs.h>
57308+#include <linux/grsecurity.h>
57309+#include <linux/grinternal.h>
57310+#include <linux/capability.h>
57311+#include <linux/module.h>
57312+
57313+#include <asm/uaccess.h>
57314+
57315+#ifdef CONFIG_GRKERNSEC_EXECLOG
57316+static char gr_exec_arg_buf[132];
57317+static DEFINE_MUTEX(gr_exec_arg_mutex);
57318+#endif
57319+
57320+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
57321+
57322+void
57323+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
57324+{
57325+#ifdef CONFIG_GRKERNSEC_EXECLOG
57326+ char *grarg = gr_exec_arg_buf;
57327+ unsigned int i, x, execlen = 0;
57328+ char c;
57329+
57330+ if (!((grsec_enable_execlog && grsec_enable_group &&
57331+ in_group_p(grsec_audit_gid))
57332+ || (grsec_enable_execlog && !grsec_enable_group)))
57333+ return;
57334+
57335+ mutex_lock(&gr_exec_arg_mutex);
57336+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
57337+
57338+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
57339+ const char __user *p;
57340+ unsigned int len;
57341+
57342+ p = get_user_arg_ptr(argv, i);
57343+ if (IS_ERR(p))
57344+ goto log;
57345+
57346+ len = strnlen_user(p, 128 - execlen);
57347+ if (len > 128 - execlen)
57348+ len = 128 - execlen;
57349+ else if (len > 0)
57350+ len--;
57351+ if (copy_from_user(grarg + execlen, p, len))
57352+ goto log;
57353+
57354+ /* rewrite unprintable characters */
57355+ for (x = 0; x < len; x++) {
57356+ c = *(grarg + execlen + x);
57357+ if (c < 32 || c > 126)
57358+ *(grarg + execlen + x) = ' ';
57359+ }
57360+
57361+ execlen += len;
57362+ *(grarg + execlen) = ' ';
57363+ *(grarg + execlen + 1) = '\0';
57364+ execlen++;
57365+ }
57366+
57367+ log:
57368+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
57369+ bprm->file->f_path.mnt, grarg);
57370+ mutex_unlock(&gr_exec_arg_mutex);
57371+#endif
57372+ return;
57373+}
57374+
57375+#ifdef CONFIG_GRKERNSEC
57376+extern int gr_acl_is_capable(const int cap);
57377+extern int gr_acl_is_capable_nolog(const int cap);
57378+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57379+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
57380+extern int gr_chroot_is_capable(const int cap);
57381+extern int gr_chroot_is_capable_nolog(const int cap);
57382+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57383+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
57384+#endif
57385+
57386+const char *captab_log[] = {
57387+ "CAP_CHOWN",
57388+ "CAP_DAC_OVERRIDE",
57389+ "CAP_DAC_READ_SEARCH",
57390+ "CAP_FOWNER",
57391+ "CAP_FSETID",
57392+ "CAP_KILL",
57393+ "CAP_SETGID",
57394+ "CAP_SETUID",
57395+ "CAP_SETPCAP",
57396+ "CAP_LINUX_IMMUTABLE",
57397+ "CAP_NET_BIND_SERVICE",
57398+ "CAP_NET_BROADCAST",
57399+ "CAP_NET_ADMIN",
57400+ "CAP_NET_RAW",
57401+ "CAP_IPC_LOCK",
57402+ "CAP_IPC_OWNER",
57403+ "CAP_SYS_MODULE",
57404+ "CAP_SYS_RAWIO",
57405+ "CAP_SYS_CHROOT",
57406+ "CAP_SYS_PTRACE",
57407+ "CAP_SYS_PACCT",
57408+ "CAP_SYS_ADMIN",
57409+ "CAP_SYS_BOOT",
57410+ "CAP_SYS_NICE",
57411+ "CAP_SYS_RESOURCE",
57412+ "CAP_SYS_TIME",
57413+ "CAP_SYS_TTY_CONFIG",
57414+ "CAP_MKNOD",
57415+ "CAP_LEASE",
57416+ "CAP_AUDIT_WRITE",
57417+ "CAP_AUDIT_CONTROL",
57418+ "CAP_SETFCAP",
57419+ "CAP_MAC_OVERRIDE",
57420+ "CAP_MAC_ADMIN",
57421+ "CAP_SYSLOG",
57422+ "CAP_WAKE_ALARM"
57423+};
57424+
57425+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
57426+
57427+int gr_is_capable(const int cap)
57428+{
57429+#ifdef CONFIG_GRKERNSEC
57430+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
57431+ return 1;
57432+ return 0;
57433+#else
57434+ return 1;
57435+#endif
57436+}
57437+
57438+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57439+{
57440+#ifdef CONFIG_GRKERNSEC
57441+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
57442+ return 1;
57443+ return 0;
57444+#else
57445+ return 1;
57446+#endif
57447+}
57448+
57449+int gr_is_capable_nolog(const int cap)
57450+{
57451+#ifdef CONFIG_GRKERNSEC
57452+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
57453+ return 1;
57454+ return 0;
57455+#else
57456+ return 1;
57457+#endif
57458+}
57459+
57460+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
57461+{
57462+#ifdef CONFIG_GRKERNSEC
57463+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
57464+ return 1;
57465+ return 0;
57466+#else
57467+ return 1;
57468+#endif
57469+}
57470+
57471+EXPORT_SYMBOL(gr_is_capable);
57472+EXPORT_SYMBOL(gr_is_capable_nolog);
57473+EXPORT_SYMBOL(gr_task_is_capable);
57474+EXPORT_SYMBOL(gr_task_is_capable_nolog);
57475diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
57476new file mode 100644
57477index 0000000..d3ee748
57478--- /dev/null
57479+++ b/grsecurity/grsec_fifo.c
57480@@ -0,0 +1,24 @@
57481+#include <linux/kernel.h>
57482+#include <linux/sched.h>
57483+#include <linux/fs.h>
57484+#include <linux/file.h>
57485+#include <linux/grinternal.h>
57486+
57487+int
57488+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
57489+ const struct dentry *dir, const int flag, const int acc_mode)
57490+{
57491+#ifdef CONFIG_GRKERNSEC_FIFO
57492+ const struct cred *cred = current_cred();
57493+
57494+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
57495+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
57496+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
57497+ (cred->fsuid != dentry->d_inode->i_uid)) {
57498+ if (!inode_permission(dentry->d_inode, acc_mode))
57499+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
57500+ return -EACCES;
57501+ }
57502+#endif
57503+ return 0;
57504+}
57505diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
57506new file mode 100644
57507index 0000000..8ca18bf
57508--- /dev/null
57509+++ b/grsecurity/grsec_fork.c
57510@@ -0,0 +1,23 @@
57511+#include <linux/kernel.h>
57512+#include <linux/sched.h>
57513+#include <linux/grsecurity.h>
57514+#include <linux/grinternal.h>
57515+#include <linux/errno.h>
57516+
57517+void
57518+gr_log_forkfail(const int retval)
57519+{
57520+#ifdef CONFIG_GRKERNSEC_FORKFAIL
57521+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
57522+ switch (retval) {
57523+ case -EAGAIN:
57524+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
57525+ break;
57526+ case -ENOMEM:
57527+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
57528+ break;
57529+ }
57530+ }
57531+#endif
57532+ return;
57533+}
57534diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
57535new file mode 100644
57536index 0000000..01ddde4
57537--- /dev/null
57538+++ b/grsecurity/grsec_init.c
57539@@ -0,0 +1,277 @@
57540+#include <linux/kernel.h>
57541+#include <linux/sched.h>
57542+#include <linux/mm.h>
57543+#include <linux/gracl.h>
57544+#include <linux/slab.h>
57545+#include <linux/vmalloc.h>
57546+#include <linux/percpu.h>
57547+#include <linux/module.h>
57548+
57549+int grsec_enable_ptrace_readexec;
57550+int grsec_enable_setxid;
57551+int grsec_enable_brute;
57552+int grsec_enable_link;
57553+int grsec_enable_dmesg;
57554+int grsec_enable_harden_ptrace;
57555+int grsec_enable_fifo;
57556+int grsec_enable_execlog;
57557+int grsec_enable_signal;
57558+int grsec_enable_forkfail;
57559+int grsec_enable_audit_ptrace;
57560+int grsec_enable_time;
57561+int grsec_enable_audit_textrel;
57562+int grsec_enable_group;
57563+int grsec_audit_gid;
57564+int grsec_enable_chdir;
57565+int grsec_enable_mount;
57566+int grsec_enable_rofs;
57567+int grsec_enable_chroot_findtask;
57568+int grsec_enable_chroot_mount;
57569+int grsec_enable_chroot_shmat;
57570+int grsec_enable_chroot_fchdir;
57571+int grsec_enable_chroot_double;
57572+int grsec_enable_chroot_pivot;
57573+int grsec_enable_chroot_chdir;
57574+int grsec_enable_chroot_chmod;
57575+int grsec_enable_chroot_mknod;
57576+int grsec_enable_chroot_nice;
57577+int grsec_enable_chroot_execlog;
57578+int grsec_enable_chroot_caps;
57579+int grsec_enable_chroot_sysctl;
57580+int grsec_enable_chroot_unix;
57581+int grsec_enable_tpe;
57582+int grsec_tpe_gid;
57583+int grsec_enable_blackhole;
57584+#ifdef CONFIG_IPV6_MODULE
57585+EXPORT_SYMBOL(grsec_enable_blackhole);
57586+#endif
57587+int grsec_lastack_retries;
57588+int grsec_enable_tpe_all;
57589+int grsec_enable_tpe_invert;
57590+int grsec_enable_socket_all;
57591+int grsec_socket_all_gid;
57592+int grsec_enable_socket_client;
57593+int grsec_socket_client_gid;
57594+int grsec_enable_socket_server;
57595+int grsec_socket_server_gid;
57596+int grsec_resource_logging;
57597+int grsec_disable_privio;
57598+int grsec_enable_log_rwxmaps;
57599+int grsec_lock;
57600+
57601+DEFINE_SPINLOCK(grsec_alert_lock);
57602+unsigned long grsec_alert_wtime = 0;
57603+unsigned long grsec_alert_fyet = 0;
57604+
57605+DEFINE_SPINLOCK(grsec_audit_lock);
57606+
57607+DEFINE_RWLOCK(grsec_exec_file_lock);
57608+
57609+char *gr_shared_page[4];
57610+
57611+char *gr_alert_log_fmt;
57612+char *gr_audit_log_fmt;
57613+char *gr_alert_log_buf;
57614+char *gr_audit_log_buf;
57615+
57616+extern struct gr_arg *gr_usermode;
57617+extern unsigned char *gr_system_salt;
57618+extern unsigned char *gr_system_sum;
57619+
57620+void __init
57621+grsecurity_init(void)
57622+{
57623+ int j;
57624+ /* create the per-cpu shared pages */
57625+
57626+#ifdef CONFIG_X86
57627+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
57628+#endif
57629+
57630+ for (j = 0; j < 4; j++) {
57631+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
57632+ if (gr_shared_page[j] == NULL) {
57633+ panic("Unable to allocate grsecurity shared page");
57634+ return;
57635+ }
57636+ }
57637+
57638+ /* allocate log buffers */
57639+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
57640+ if (!gr_alert_log_fmt) {
57641+ panic("Unable to allocate grsecurity alert log format buffer");
57642+ return;
57643+ }
57644+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
57645+ if (!gr_audit_log_fmt) {
57646+ panic("Unable to allocate grsecurity audit log format buffer");
57647+ return;
57648+ }
57649+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57650+ if (!gr_alert_log_buf) {
57651+ panic("Unable to allocate grsecurity alert log buffer");
57652+ return;
57653+ }
57654+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57655+ if (!gr_audit_log_buf) {
57656+ panic("Unable to allocate grsecurity audit log buffer");
57657+ return;
57658+ }
57659+
57660+ /* allocate memory for authentication structure */
57661+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
57662+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
57663+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
57664+
57665+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
57666+ panic("Unable to allocate grsecurity authentication structure");
57667+ return;
57668+ }
57669+
57670+
57671+#ifdef CONFIG_GRKERNSEC_IO
57672+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
57673+ grsec_disable_privio = 1;
57674+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57675+ grsec_disable_privio = 1;
57676+#else
57677+ grsec_disable_privio = 0;
57678+#endif
57679+#endif
57680+
57681+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57682+ /* for backward compatibility, tpe_invert always defaults to on if
57683+ enabled in the kernel
57684+ */
57685+ grsec_enable_tpe_invert = 1;
57686+#endif
57687+
57688+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57689+#ifndef CONFIG_GRKERNSEC_SYSCTL
57690+ grsec_lock = 1;
57691+#endif
57692+
57693+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57694+ grsec_enable_audit_textrel = 1;
57695+#endif
57696+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57697+ grsec_enable_log_rwxmaps = 1;
57698+#endif
57699+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57700+ grsec_enable_group = 1;
57701+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57702+#endif
57703+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57704+ grsec_enable_ptrace_readexec = 1;
57705+#endif
57706+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57707+ grsec_enable_chdir = 1;
57708+#endif
57709+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57710+ grsec_enable_harden_ptrace = 1;
57711+#endif
57712+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57713+ grsec_enable_mount = 1;
57714+#endif
57715+#ifdef CONFIG_GRKERNSEC_LINK
57716+ grsec_enable_link = 1;
57717+#endif
57718+#ifdef CONFIG_GRKERNSEC_BRUTE
57719+ grsec_enable_brute = 1;
57720+#endif
57721+#ifdef CONFIG_GRKERNSEC_DMESG
57722+ grsec_enable_dmesg = 1;
57723+#endif
57724+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57725+ grsec_enable_blackhole = 1;
57726+ grsec_lastack_retries = 4;
57727+#endif
57728+#ifdef CONFIG_GRKERNSEC_FIFO
57729+ grsec_enable_fifo = 1;
57730+#endif
57731+#ifdef CONFIG_GRKERNSEC_EXECLOG
57732+ grsec_enable_execlog = 1;
57733+#endif
57734+#ifdef CONFIG_GRKERNSEC_SETXID
57735+ grsec_enable_setxid = 1;
57736+#endif
57737+#ifdef CONFIG_GRKERNSEC_SIGNAL
57738+ grsec_enable_signal = 1;
57739+#endif
57740+#ifdef CONFIG_GRKERNSEC_FORKFAIL
57741+ grsec_enable_forkfail = 1;
57742+#endif
57743+#ifdef CONFIG_GRKERNSEC_TIME
57744+ grsec_enable_time = 1;
57745+#endif
57746+#ifdef CONFIG_GRKERNSEC_RESLOG
57747+ grsec_resource_logging = 1;
57748+#endif
57749+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57750+ grsec_enable_chroot_findtask = 1;
57751+#endif
57752+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57753+ grsec_enable_chroot_unix = 1;
57754+#endif
57755+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57756+ grsec_enable_chroot_mount = 1;
57757+#endif
57758+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57759+ grsec_enable_chroot_fchdir = 1;
57760+#endif
57761+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57762+ grsec_enable_chroot_shmat = 1;
57763+#endif
57764+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57765+ grsec_enable_audit_ptrace = 1;
57766+#endif
57767+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57768+ grsec_enable_chroot_double = 1;
57769+#endif
57770+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57771+ grsec_enable_chroot_pivot = 1;
57772+#endif
57773+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57774+ grsec_enable_chroot_chdir = 1;
57775+#endif
57776+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57777+ grsec_enable_chroot_chmod = 1;
57778+#endif
57779+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57780+ grsec_enable_chroot_mknod = 1;
57781+#endif
57782+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57783+ grsec_enable_chroot_nice = 1;
57784+#endif
57785+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57786+ grsec_enable_chroot_execlog = 1;
57787+#endif
57788+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57789+ grsec_enable_chroot_caps = 1;
57790+#endif
57791+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57792+ grsec_enable_chroot_sysctl = 1;
57793+#endif
57794+#ifdef CONFIG_GRKERNSEC_TPE
57795+ grsec_enable_tpe = 1;
57796+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57797+#ifdef CONFIG_GRKERNSEC_TPE_ALL
57798+ grsec_enable_tpe_all = 1;
57799+#endif
57800+#endif
57801+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57802+ grsec_enable_socket_all = 1;
57803+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57804+#endif
57805+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57806+ grsec_enable_socket_client = 1;
57807+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57808+#endif
57809+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57810+ grsec_enable_socket_server = 1;
57811+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57812+#endif
57813+#endif
57814+
57815+ return;
57816+}
57817diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
57818new file mode 100644
57819index 0000000..3efe141
57820--- /dev/null
57821+++ b/grsecurity/grsec_link.c
57822@@ -0,0 +1,43 @@
57823+#include <linux/kernel.h>
57824+#include <linux/sched.h>
57825+#include <linux/fs.h>
57826+#include <linux/file.h>
57827+#include <linux/grinternal.h>
57828+
57829+int
57830+gr_handle_follow_link(const struct inode *parent,
57831+ const struct inode *inode,
57832+ const struct dentry *dentry, const struct vfsmount *mnt)
57833+{
57834+#ifdef CONFIG_GRKERNSEC_LINK
57835+ const struct cred *cred = current_cred();
57836+
57837+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57838+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57839+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57840+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57841+ return -EACCES;
57842+ }
57843+#endif
57844+ return 0;
57845+}
57846+
57847+int
57848+gr_handle_hardlink(const struct dentry *dentry,
57849+ const struct vfsmount *mnt,
57850+ struct inode *inode, const int mode, const char *to)
57851+{
57852+#ifdef CONFIG_GRKERNSEC_LINK
57853+ const struct cred *cred = current_cred();
57854+
57855+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57856+ (!S_ISREG(mode) || (mode & S_ISUID) ||
57857+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
57858+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
57859+ !capable(CAP_FOWNER) && cred->uid) {
57860+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57861+ return -EPERM;
57862+ }
57863+#endif
57864+ return 0;
57865+}
57866diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57867new file mode 100644
57868index 0000000..a45d2e9
57869--- /dev/null
57870+++ b/grsecurity/grsec_log.c
57871@@ -0,0 +1,322 @@
57872+#include <linux/kernel.h>
57873+#include <linux/sched.h>
57874+#include <linux/file.h>
57875+#include <linux/tty.h>
57876+#include <linux/fs.h>
57877+#include <linux/grinternal.h>
57878+
57879+#ifdef CONFIG_TREE_PREEMPT_RCU
57880+#define DISABLE_PREEMPT() preempt_disable()
57881+#define ENABLE_PREEMPT() preempt_enable()
57882+#else
57883+#define DISABLE_PREEMPT()
57884+#define ENABLE_PREEMPT()
57885+#endif
57886+
57887+#define BEGIN_LOCKS(x) \
57888+ DISABLE_PREEMPT(); \
57889+ rcu_read_lock(); \
57890+ read_lock(&tasklist_lock); \
57891+ read_lock(&grsec_exec_file_lock); \
57892+ if (x != GR_DO_AUDIT) \
57893+ spin_lock(&grsec_alert_lock); \
57894+ else \
57895+ spin_lock(&grsec_audit_lock)
57896+
57897+#define END_LOCKS(x) \
57898+ if (x != GR_DO_AUDIT) \
57899+ spin_unlock(&grsec_alert_lock); \
57900+ else \
57901+ spin_unlock(&grsec_audit_lock); \
57902+ read_unlock(&grsec_exec_file_lock); \
57903+ read_unlock(&tasklist_lock); \
57904+ rcu_read_unlock(); \
57905+ ENABLE_PREEMPT(); \
57906+ if (x == GR_DONT_AUDIT) \
57907+ gr_handle_alertkill(current)
57908+
57909+enum {
57910+ FLOODING,
57911+ NO_FLOODING
57912+};
57913+
57914+extern char *gr_alert_log_fmt;
57915+extern char *gr_audit_log_fmt;
57916+extern char *gr_alert_log_buf;
57917+extern char *gr_audit_log_buf;
57918+
57919+static int gr_log_start(int audit)
57920+{
57921+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57922+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57923+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57924+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57925+ unsigned long curr_secs = get_seconds();
57926+
57927+ if (audit == GR_DO_AUDIT)
57928+ goto set_fmt;
57929+
57930+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57931+ grsec_alert_wtime = curr_secs;
57932+ grsec_alert_fyet = 0;
57933+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57934+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
57935+ grsec_alert_fyet++;
57936+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
57937+ grsec_alert_wtime = curr_secs;
57938+ grsec_alert_fyet++;
57939+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57940+ return FLOODING;
57941+ }
57942+ else return FLOODING;
57943+
57944+set_fmt:
57945+#endif
57946+ memset(buf, 0, PAGE_SIZE);
57947+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
57948+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
57949+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57950+ } else if (current->signal->curr_ip) {
57951+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
57952+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
57953+ } else if (gr_acl_is_enabled()) {
57954+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
57955+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57956+ } else {
57957+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
57958+ strcpy(buf, fmt);
57959+ }
57960+
57961+ return NO_FLOODING;
57962+}
57963+
57964+static void gr_log_middle(int audit, const char *msg, va_list ap)
57965+ __attribute__ ((format (printf, 2, 0)));
57966+
57967+static void gr_log_middle(int audit, const char *msg, va_list ap)
57968+{
57969+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57970+ unsigned int len = strlen(buf);
57971+
57972+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57973+
57974+ return;
57975+}
57976+
57977+static void gr_log_middle_varargs(int audit, const char *msg, ...)
57978+ __attribute__ ((format (printf, 2, 3)));
57979+
57980+static void gr_log_middle_varargs(int audit, const char *msg, ...)
57981+{
57982+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57983+ unsigned int len = strlen(buf);
57984+ va_list ap;
57985+
57986+ va_start(ap, msg);
57987+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57988+ va_end(ap);
57989+
57990+ return;
57991+}
57992+
57993+static void gr_log_end(int audit, int append_default)
57994+{
57995+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57996+
57997+ if (append_default) {
57998+ unsigned int len = strlen(buf);
57999+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
58000+ }
58001+
58002+ printk("%s\n", buf);
58003+
58004+ return;
58005+}
58006+
58007+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
58008+{
58009+ int logtype;
58010+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
58011+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
58012+ void *voidptr = NULL;
58013+ int num1 = 0, num2 = 0;
58014+ unsigned long ulong1 = 0, ulong2 = 0;
58015+ struct dentry *dentry = NULL;
58016+ struct vfsmount *mnt = NULL;
58017+ struct file *file = NULL;
58018+ struct task_struct *task = NULL;
58019+ const struct cred *cred, *pcred;
58020+ va_list ap;
58021+
58022+ BEGIN_LOCKS(audit);
58023+ logtype = gr_log_start(audit);
58024+ if (logtype == FLOODING) {
58025+ END_LOCKS(audit);
58026+ return;
58027+ }
58028+ va_start(ap, argtypes);
58029+ switch (argtypes) {
58030+ case GR_TTYSNIFF:
58031+ task = va_arg(ap, struct task_struct *);
58032+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
58033+ break;
58034+ case GR_SYSCTL_HIDDEN:
58035+ str1 = va_arg(ap, char *);
58036+ gr_log_middle_varargs(audit, msg, result, str1);
58037+ break;
58038+ case GR_RBAC:
58039+ dentry = va_arg(ap, struct dentry *);
58040+ mnt = va_arg(ap, struct vfsmount *);
58041+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
58042+ break;
58043+ case GR_RBAC_STR:
58044+ dentry = va_arg(ap, struct dentry *);
58045+ mnt = va_arg(ap, struct vfsmount *);
58046+ str1 = va_arg(ap, char *);
58047+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
58048+ break;
58049+ case GR_STR_RBAC:
58050+ str1 = va_arg(ap, char *);
58051+ dentry = va_arg(ap, struct dentry *);
58052+ mnt = va_arg(ap, struct vfsmount *);
58053+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
58054+ break;
58055+ case GR_RBAC_MODE2:
58056+ dentry = va_arg(ap, struct dentry *);
58057+ mnt = va_arg(ap, struct vfsmount *);
58058+ str1 = va_arg(ap, char *);
58059+ str2 = va_arg(ap, char *);
58060+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
58061+ break;
58062+ case GR_RBAC_MODE3:
58063+ dentry = va_arg(ap, struct dentry *);
58064+ mnt = va_arg(ap, struct vfsmount *);
58065+ str1 = va_arg(ap, char *);
58066+ str2 = va_arg(ap, char *);
58067+ str3 = va_arg(ap, char *);
58068+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
58069+ break;
58070+ case GR_FILENAME:
58071+ dentry = va_arg(ap, struct dentry *);
58072+ mnt = va_arg(ap, struct vfsmount *);
58073+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
58074+ break;
58075+ case GR_STR_FILENAME:
58076+ str1 = va_arg(ap, char *);
58077+ dentry = va_arg(ap, struct dentry *);
58078+ mnt = va_arg(ap, struct vfsmount *);
58079+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
58080+ break;
58081+ case GR_FILENAME_STR:
58082+ dentry = va_arg(ap, struct dentry *);
58083+ mnt = va_arg(ap, struct vfsmount *);
58084+ str1 = va_arg(ap, char *);
58085+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
58086+ break;
58087+ case GR_FILENAME_TWO_INT:
58088+ dentry = va_arg(ap, struct dentry *);
58089+ mnt = va_arg(ap, struct vfsmount *);
58090+ num1 = va_arg(ap, int);
58091+ num2 = va_arg(ap, int);
58092+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
58093+ break;
58094+ case GR_FILENAME_TWO_INT_STR:
58095+ dentry = va_arg(ap, struct dentry *);
58096+ mnt = va_arg(ap, struct vfsmount *);
58097+ num1 = va_arg(ap, int);
58098+ num2 = va_arg(ap, int);
58099+ str1 = va_arg(ap, char *);
58100+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
58101+ break;
58102+ case GR_TEXTREL:
58103+ file = va_arg(ap, struct file *);
58104+ ulong1 = va_arg(ap, unsigned long);
58105+ ulong2 = va_arg(ap, unsigned long);
58106+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58107+ break;
58108+ case GR_PTRACE:
58109+ task = va_arg(ap, struct task_struct *);
58110+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58111+ break;
58112+ case GR_RESOURCE:
58113+ task = va_arg(ap, struct task_struct *);
58114+ cred = __task_cred(task);
58115+ pcred = __task_cred(task->real_parent);
58116+ ulong1 = va_arg(ap, unsigned long);
58117+ str1 = va_arg(ap, char *);
58118+ ulong2 = va_arg(ap, unsigned long);
58119+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58120+ break;
58121+ case GR_CAP:
58122+ task = va_arg(ap, struct task_struct *);
58123+ cred = __task_cred(task);
58124+ pcred = __task_cred(task->real_parent);
58125+ str1 = va_arg(ap, char *);
58126+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58127+ break;
58128+ case GR_SIG:
58129+ str1 = va_arg(ap, char *);
58130+ voidptr = va_arg(ap, void *);
58131+ gr_log_middle_varargs(audit, msg, str1, voidptr);
58132+ break;
58133+ case GR_SIG2:
58134+ task = va_arg(ap, struct task_struct *);
58135+ cred = __task_cred(task);
58136+ pcred = __task_cred(task->real_parent);
58137+ num1 = va_arg(ap, int);
58138+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58139+ break;
58140+ case GR_CRASH1:
58141+ task = va_arg(ap, struct task_struct *);
58142+ cred = __task_cred(task);
58143+ pcred = __task_cred(task->real_parent);
58144+ ulong1 = va_arg(ap, unsigned long);
58145+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
58146+ break;
58147+ case GR_CRASH2:
58148+ task = va_arg(ap, struct task_struct *);
58149+ cred = __task_cred(task);
58150+ pcred = __task_cred(task->real_parent);
58151+ ulong1 = va_arg(ap, unsigned long);
58152+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
58153+ break;
58154+ case GR_RWXMAP:
58155+ file = va_arg(ap, struct file *);
58156+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
58157+ break;
58158+ case GR_PSACCT:
58159+ {
58160+ unsigned int wday, cday;
58161+ __u8 whr, chr;
58162+ __u8 wmin, cmin;
58163+ __u8 wsec, csec;
58164+ char cur_tty[64] = { 0 };
58165+ char parent_tty[64] = { 0 };
58166+
58167+ task = va_arg(ap, struct task_struct *);
58168+ wday = va_arg(ap, unsigned int);
58169+ cday = va_arg(ap, unsigned int);
58170+ whr = va_arg(ap, int);
58171+ chr = va_arg(ap, int);
58172+ wmin = va_arg(ap, int);
58173+ cmin = va_arg(ap, int);
58174+ wsec = va_arg(ap, int);
58175+ csec = va_arg(ap, int);
58176+ ulong1 = va_arg(ap, unsigned long);
58177+ cred = __task_cred(task);
58178+ pcred = __task_cred(task->real_parent);
58179+
58180+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58181+ }
58182+ break;
58183+ default:
58184+ gr_log_middle(audit, msg, ap);
58185+ }
58186+ va_end(ap);
58187+ // these don't need DEFAULTSECARGS printed on the end
58188+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
58189+ gr_log_end(audit, 0);
58190+ else
58191+ gr_log_end(audit, 1);
58192+ END_LOCKS(audit);
58193+}
58194diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
58195new file mode 100644
58196index 0000000..f536303
58197--- /dev/null
58198+++ b/grsecurity/grsec_mem.c
58199@@ -0,0 +1,40 @@
58200+#include <linux/kernel.h>
58201+#include <linux/sched.h>
58202+#include <linux/mm.h>
58203+#include <linux/mman.h>
58204+#include <linux/grinternal.h>
58205+
58206+void
58207+gr_handle_ioperm(void)
58208+{
58209+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
58210+ return;
58211+}
58212+
58213+void
58214+gr_handle_iopl(void)
58215+{
58216+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
58217+ return;
58218+}
58219+
58220+void
58221+gr_handle_mem_readwrite(u64 from, u64 to)
58222+{
58223+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
58224+ return;
58225+}
58226+
58227+void
58228+gr_handle_vm86(void)
58229+{
58230+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
58231+ return;
58232+}
58233+
58234+void
58235+gr_log_badprocpid(const char *entry)
58236+{
58237+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
58238+ return;
58239+}
58240diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
58241new file mode 100644
58242index 0000000..2131422
58243--- /dev/null
58244+++ b/grsecurity/grsec_mount.c
58245@@ -0,0 +1,62 @@
58246+#include <linux/kernel.h>
58247+#include <linux/sched.h>
58248+#include <linux/mount.h>
58249+#include <linux/grsecurity.h>
58250+#include <linux/grinternal.h>
58251+
58252+void
58253+gr_log_remount(const char *devname, const int retval)
58254+{
58255+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58256+ if (grsec_enable_mount && (retval >= 0))
58257+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
58258+#endif
58259+ return;
58260+}
58261+
58262+void
58263+gr_log_unmount(const char *devname, const int retval)
58264+{
58265+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58266+ if (grsec_enable_mount && (retval >= 0))
58267+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
58268+#endif
58269+ return;
58270+}
58271+
58272+void
58273+gr_log_mount(const char *from, const char *to, const int retval)
58274+{
58275+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58276+ if (grsec_enable_mount && (retval >= 0))
58277+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
58278+#endif
58279+ return;
58280+}
58281+
58282+int
58283+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
58284+{
58285+#ifdef CONFIG_GRKERNSEC_ROFS
58286+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
58287+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
58288+ return -EPERM;
58289+ } else
58290+ return 0;
58291+#endif
58292+ return 0;
58293+}
58294+
58295+int
58296+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
58297+{
58298+#ifdef CONFIG_GRKERNSEC_ROFS
58299+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
58300+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
58301+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
58302+ return -EPERM;
58303+ } else
58304+ return 0;
58305+#endif
58306+ return 0;
58307+}
58308diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
58309new file mode 100644
58310index 0000000..a3b12a0
58311--- /dev/null
58312+++ b/grsecurity/grsec_pax.c
58313@@ -0,0 +1,36 @@
58314+#include <linux/kernel.h>
58315+#include <linux/sched.h>
58316+#include <linux/mm.h>
58317+#include <linux/file.h>
58318+#include <linux/grinternal.h>
58319+#include <linux/grsecurity.h>
58320+
58321+void
58322+gr_log_textrel(struct vm_area_struct * vma)
58323+{
58324+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58325+ if (grsec_enable_audit_textrel)
58326+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
58327+#endif
58328+ return;
58329+}
58330+
58331+void
58332+gr_log_rwxmmap(struct file *file)
58333+{
58334+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58335+ if (grsec_enable_log_rwxmaps)
58336+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
58337+#endif
58338+ return;
58339+}
58340+
58341+void
58342+gr_log_rwxmprotect(struct file *file)
58343+{
58344+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58345+ if (grsec_enable_log_rwxmaps)
58346+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
58347+#endif
58348+ return;
58349+}
58350diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
58351new file mode 100644
58352index 0000000..f7f29aa
58353--- /dev/null
58354+++ b/grsecurity/grsec_ptrace.c
58355@@ -0,0 +1,30 @@
58356+#include <linux/kernel.h>
58357+#include <linux/sched.h>
58358+#include <linux/grinternal.h>
58359+#include <linux/security.h>
58360+
58361+void
58362+gr_audit_ptrace(struct task_struct *task)
58363+{
58364+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58365+ if (grsec_enable_audit_ptrace)
58366+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
58367+#endif
58368+ return;
58369+}
58370+
58371+int
58372+gr_ptrace_readexec(struct file *file, int unsafe_flags)
58373+{
58374+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58375+ const struct dentry *dentry = file->f_path.dentry;
58376+ const struct vfsmount *mnt = file->f_path.mnt;
58377+
58378+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
58379+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
58380+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
58381+ return -EACCES;
58382+ }
58383+#endif
58384+ return 0;
58385+}
58386diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
58387new file mode 100644
58388index 0000000..7a5b2de
58389--- /dev/null
58390+++ b/grsecurity/grsec_sig.c
58391@@ -0,0 +1,207 @@
58392+#include <linux/kernel.h>
58393+#include <linux/sched.h>
58394+#include <linux/delay.h>
58395+#include <linux/grsecurity.h>
58396+#include <linux/grinternal.h>
58397+#include <linux/hardirq.h>
58398+
58399+char *signames[] = {
58400+ [SIGSEGV] = "Segmentation fault",
58401+ [SIGILL] = "Illegal instruction",
58402+ [SIGABRT] = "Abort",
58403+ [SIGBUS] = "Invalid alignment/Bus error"
58404+};
58405+
58406+void
58407+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
58408+{
58409+#ifdef CONFIG_GRKERNSEC_SIGNAL
58410+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
58411+ (sig == SIGABRT) || (sig == SIGBUS))) {
58412+ if (t->pid == current->pid) {
58413+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
58414+ } else {
58415+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
58416+ }
58417+ }
58418+#endif
58419+ return;
58420+}
58421+
58422+int
58423+gr_handle_signal(const struct task_struct *p, const int sig)
58424+{
58425+#ifdef CONFIG_GRKERNSEC
58426+ /* ignore the 0 signal for protected task checks */
58427+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
58428+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
58429+ return -EPERM;
58430+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
58431+ return -EPERM;
58432+ }
58433+#endif
58434+ return 0;
58435+}
58436+
58437+#ifdef CONFIG_GRKERNSEC
58438+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
58439+
58440+int gr_fake_force_sig(int sig, struct task_struct *t)
58441+{
58442+ unsigned long int flags;
58443+ int ret, blocked, ignored;
58444+ struct k_sigaction *action;
58445+
58446+ spin_lock_irqsave(&t->sighand->siglock, flags);
58447+ action = &t->sighand->action[sig-1];
58448+ ignored = action->sa.sa_handler == SIG_IGN;
58449+ blocked = sigismember(&t->blocked, sig);
58450+ if (blocked || ignored) {
58451+ action->sa.sa_handler = SIG_DFL;
58452+ if (blocked) {
58453+ sigdelset(&t->blocked, sig);
58454+ recalc_sigpending_and_wake(t);
58455+ }
58456+ }
58457+ if (action->sa.sa_handler == SIG_DFL)
58458+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
58459+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
58460+
58461+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
58462+
58463+ return ret;
58464+}
58465+#endif
58466+
58467+#ifdef CONFIG_GRKERNSEC_BRUTE
58468+#define GR_USER_BAN_TIME (15 * 60)
58469+
58470+static int __get_dumpable(unsigned long mm_flags)
58471+{
58472+ int ret;
58473+
58474+ ret = mm_flags & MMF_DUMPABLE_MASK;
58475+ return (ret >= 2) ? 2 : ret;
58476+}
58477+#endif
58478+
58479+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
58480+{
58481+#ifdef CONFIG_GRKERNSEC_BRUTE
58482+ uid_t uid = 0;
58483+
58484+ if (!grsec_enable_brute)
58485+ return;
58486+
58487+ rcu_read_lock();
58488+ read_lock(&tasklist_lock);
58489+ read_lock(&grsec_exec_file_lock);
58490+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
58491+ p->real_parent->brute = 1;
58492+ else {
58493+ const struct cred *cred = __task_cred(p), *cred2;
58494+ struct task_struct *tsk, *tsk2;
58495+
58496+ if (!__get_dumpable(mm_flags) && cred->uid) {
58497+ struct user_struct *user;
58498+
58499+ uid = cred->uid;
58500+
58501+ /* this is put upon execution past expiration */
58502+ user = find_user(uid);
58503+ if (user == NULL)
58504+ goto unlock;
58505+ user->banned = 1;
58506+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
58507+ if (user->ban_expires == ~0UL)
58508+ user->ban_expires--;
58509+
58510+ do_each_thread(tsk2, tsk) {
58511+ cred2 = __task_cred(tsk);
58512+ if (tsk != p && cred2->uid == uid)
58513+ gr_fake_force_sig(SIGKILL, tsk);
58514+ } while_each_thread(tsk2, tsk);
58515+ }
58516+ }
58517+unlock:
58518+ read_unlock(&grsec_exec_file_lock);
58519+ read_unlock(&tasklist_lock);
58520+ rcu_read_unlock();
58521+
58522+ if (uid)
58523+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
58524+
58525+#endif
58526+ return;
58527+}
58528+
58529+void gr_handle_brute_check(void)
58530+{
58531+#ifdef CONFIG_GRKERNSEC_BRUTE
58532+ if (current->brute)
58533+ msleep(30 * 1000);
58534+#endif
58535+ return;
58536+}
58537+
58538+void gr_handle_kernel_exploit(void)
58539+{
58540+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
58541+ const struct cred *cred;
58542+ struct task_struct *tsk, *tsk2;
58543+ struct user_struct *user;
58544+ uid_t uid;
58545+
58546+ if (in_irq() || in_serving_softirq() || in_nmi())
58547+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
58548+
58549+ uid = current_uid();
58550+
58551+ if (uid == 0)
58552+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
58553+ else {
58554+ /* kill all the processes of this user, hold a reference
58555+ to their creds struct, and prevent them from creating
58556+ another process until system reset
58557+ */
58558+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
58559+ /* we intentionally leak this ref */
58560+ user = get_uid(current->cred->user);
58561+ if (user) {
58562+ user->banned = 1;
58563+ user->ban_expires = ~0UL;
58564+ }
58565+
58566+ read_lock(&tasklist_lock);
58567+ do_each_thread(tsk2, tsk) {
58568+ cred = __task_cred(tsk);
58569+ if (cred->uid == uid)
58570+ gr_fake_force_sig(SIGKILL, tsk);
58571+ } while_each_thread(tsk2, tsk);
58572+ read_unlock(&tasklist_lock);
58573+ }
58574+#endif
58575+}
58576+
58577+int __gr_process_user_ban(struct user_struct *user)
58578+{
58579+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58580+ if (unlikely(user->banned)) {
58581+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
58582+ user->banned = 0;
58583+ user->ban_expires = 0;
58584+ free_uid(user);
58585+ } else
58586+ return -EPERM;
58587+ }
58588+#endif
58589+ return 0;
58590+}
58591+
58592+int gr_process_user_ban(void)
58593+{
58594+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58595+ return __gr_process_user_ban(current->cred->user);
58596+#endif
58597+ return 0;
58598+}
58599diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
58600new file mode 100644
58601index 0000000..4030d57
58602--- /dev/null
58603+++ b/grsecurity/grsec_sock.c
58604@@ -0,0 +1,244 @@
58605+#include <linux/kernel.h>
58606+#include <linux/module.h>
58607+#include <linux/sched.h>
58608+#include <linux/file.h>
58609+#include <linux/net.h>
58610+#include <linux/in.h>
58611+#include <linux/ip.h>
58612+#include <net/sock.h>
58613+#include <net/inet_sock.h>
58614+#include <linux/grsecurity.h>
58615+#include <linux/grinternal.h>
58616+#include <linux/gracl.h>
58617+
58618+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
58619+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
58620+
58621+EXPORT_SYMBOL(gr_search_udp_recvmsg);
58622+EXPORT_SYMBOL(gr_search_udp_sendmsg);
58623+
58624+#ifdef CONFIG_UNIX_MODULE
58625+EXPORT_SYMBOL(gr_acl_handle_unix);
58626+EXPORT_SYMBOL(gr_acl_handle_mknod);
58627+EXPORT_SYMBOL(gr_handle_chroot_unix);
58628+EXPORT_SYMBOL(gr_handle_create);
58629+#endif
58630+
58631+#ifdef CONFIG_GRKERNSEC
58632+#define gr_conn_table_size 32749
58633+struct conn_table_entry {
58634+ struct conn_table_entry *next;
58635+ struct signal_struct *sig;
58636+};
58637+
58638+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
58639+DEFINE_SPINLOCK(gr_conn_table_lock);
58640+
58641+extern const char * gr_socktype_to_name(unsigned char type);
58642+extern const char * gr_proto_to_name(unsigned char proto);
58643+extern const char * gr_sockfamily_to_name(unsigned char family);
58644+
58645+static __inline__ int
58646+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
58647+{
58648+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
58649+}
58650+
58651+static __inline__ int
58652+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
58653+ __u16 sport, __u16 dport)
58654+{
58655+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
58656+ sig->gr_sport == sport && sig->gr_dport == dport))
58657+ return 1;
58658+ else
58659+ return 0;
58660+}
58661+
58662+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
58663+{
58664+ struct conn_table_entry **match;
58665+ unsigned int index;
58666+
58667+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58668+ sig->gr_sport, sig->gr_dport,
58669+ gr_conn_table_size);
58670+
58671+ newent->sig = sig;
58672+
58673+ match = &gr_conn_table[index];
58674+ newent->next = *match;
58675+ *match = newent;
58676+
58677+ return;
58678+}
58679+
58680+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
58681+{
58682+ struct conn_table_entry *match, *last = NULL;
58683+ unsigned int index;
58684+
58685+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58686+ sig->gr_sport, sig->gr_dport,
58687+ gr_conn_table_size);
58688+
58689+ match = gr_conn_table[index];
58690+ while (match && !conn_match(match->sig,
58691+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58692+ sig->gr_dport)) {
58693+ last = match;
58694+ match = match->next;
58695+ }
58696+
58697+ if (match) {
58698+ if (last)
58699+ last->next = match->next;
58700+ else
58701+ gr_conn_table[index] = NULL;
58702+ kfree(match);
58703+ }
58704+
58705+ return;
58706+}
58707+
58708+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58709+ __u16 sport, __u16 dport)
58710+{
58711+ struct conn_table_entry *match;
58712+ unsigned int index;
58713+
58714+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58715+
58716+ match = gr_conn_table[index];
58717+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58718+ match = match->next;
58719+
58720+ if (match)
58721+ return match->sig;
58722+ else
58723+ return NULL;
58724+}
58725+
58726+#endif
58727+
58728+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58729+{
58730+#ifdef CONFIG_GRKERNSEC
58731+ struct signal_struct *sig = task->signal;
58732+ struct conn_table_entry *newent;
58733+
58734+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58735+ if (newent == NULL)
58736+ return;
58737+ /* no bh lock needed since we are called with bh disabled */
58738+ spin_lock(&gr_conn_table_lock);
58739+ gr_del_task_from_ip_table_nolock(sig);
58740+ sig->gr_saddr = inet->inet_rcv_saddr;
58741+ sig->gr_daddr = inet->inet_daddr;
58742+ sig->gr_sport = inet->inet_sport;
58743+ sig->gr_dport = inet->inet_dport;
58744+ gr_add_to_task_ip_table_nolock(sig, newent);
58745+ spin_unlock(&gr_conn_table_lock);
58746+#endif
58747+ return;
58748+}
58749+
58750+void gr_del_task_from_ip_table(struct task_struct *task)
58751+{
58752+#ifdef CONFIG_GRKERNSEC
58753+ spin_lock_bh(&gr_conn_table_lock);
58754+ gr_del_task_from_ip_table_nolock(task->signal);
58755+ spin_unlock_bh(&gr_conn_table_lock);
58756+#endif
58757+ return;
58758+}
58759+
58760+void
58761+gr_attach_curr_ip(const struct sock *sk)
58762+{
58763+#ifdef CONFIG_GRKERNSEC
58764+ struct signal_struct *p, *set;
58765+ const struct inet_sock *inet = inet_sk(sk);
58766+
58767+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58768+ return;
58769+
58770+ set = current->signal;
58771+
58772+ spin_lock_bh(&gr_conn_table_lock);
58773+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58774+ inet->inet_dport, inet->inet_sport);
58775+ if (unlikely(p != NULL)) {
58776+ set->curr_ip = p->curr_ip;
58777+ set->used_accept = 1;
58778+ gr_del_task_from_ip_table_nolock(p);
58779+ spin_unlock_bh(&gr_conn_table_lock);
58780+ return;
58781+ }
58782+ spin_unlock_bh(&gr_conn_table_lock);
58783+
58784+ set->curr_ip = inet->inet_daddr;
58785+ set->used_accept = 1;
58786+#endif
58787+ return;
58788+}
58789+
58790+int
58791+gr_handle_sock_all(const int family, const int type, const int protocol)
58792+{
58793+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58794+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
58795+ (family != AF_UNIX)) {
58796+ if (family == AF_INET)
58797+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58798+ else
58799+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58800+ return -EACCES;
58801+ }
58802+#endif
58803+ return 0;
58804+}
58805+
58806+int
58807+gr_handle_sock_server(const struct sockaddr *sck)
58808+{
58809+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58810+ if (grsec_enable_socket_server &&
58811+ in_group_p(grsec_socket_server_gid) &&
58812+ sck && (sck->sa_family != AF_UNIX) &&
58813+ (sck->sa_family != AF_LOCAL)) {
58814+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58815+ return -EACCES;
58816+ }
58817+#endif
58818+ return 0;
58819+}
58820+
58821+int
58822+gr_handle_sock_server_other(const struct sock *sck)
58823+{
58824+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58825+ if (grsec_enable_socket_server &&
58826+ in_group_p(grsec_socket_server_gid) &&
58827+ sck && (sck->sk_family != AF_UNIX) &&
58828+ (sck->sk_family != AF_LOCAL)) {
58829+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58830+ return -EACCES;
58831+ }
58832+#endif
58833+ return 0;
58834+}
58835+
58836+int
58837+gr_handle_sock_client(const struct sockaddr *sck)
58838+{
58839+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58840+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58841+ sck && (sck->sa_family != AF_UNIX) &&
58842+ (sck->sa_family != AF_LOCAL)) {
58843+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58844+ return -EACCES;
58845+ }
58846+#endif
58847+ return 0;
58848+}
58849diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58850new file mode 100644
58851index 0000000..a1aedd7
58852--- /dev/null
58853+++ b/grsecurity/grsec_sysctl.c
58854@@ -0,0 +1,451 @@
58855+#include <linux/kernel.h>
58856+#include <linux/sched.h>
58857+#include <linux/sysctl.h>
58858+#include <linux/grsecurity.h>
58859+#include <linux/grinternal.h>
58860+
58861+int
58862+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58863+{
58864+#ifdef CONFIG_GRKERNSEC_SYSCTL
58865+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58866+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58867+ return -EACCES;
58868+ }
58869+#endif
58870+ return 0;
58871+}
58872+
58873+#ifdef CONFIG_GRKERNSEC_ROFS
58874+static int __maybe_unused one = 1;
58875+#endif
58876+
58877+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58878+struct ctl_table grsecurity_table[] = {
58879+#ifdef CONFIG_GRKERNSEC_SYSCTL
58880+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58881+#ifdef CONFIG_GRKERNSEC_IO
58882+ {
58883+ .procname = "disable_priv_io",
58884+ .data = &grsec_disable_privio,
58885+ .maxlen = sizeof(int),
58886+ .mode = 0600,
58887+ .proc_handler = &proc_dointvec,
58888+ },
58889+#endif
58890+#endif
58891+#ifdef CONFIG_GRKERNSEC_LINK
58892+ {
58893+ .procname = "linking_restrictions",
58894+ .data = &grsec_enable_link,
58895+ .maxlen = sizeof(int),
58896+ .mode = 0600,
58897+ .proc_handler = &proc_dointvec,
58898+ },
58899+#endif
58900+#ifdef CONFIG_GRKERNSEC_BRUTE
58901+ {
58902+ .procname = "deter_bruteforce",
58903+ .data = &grsec_enable_brute,
58904+ .maxlen = sizeof(int),
58905+ .mode = 0600,
58906+ .proc_handler = &proc_dointvec,
58907+ },
58908+#endif
58909+#ifdef CONFIG_GRKERNSEC_FIFO
58910+ {
58911+ .procname = "fifo_restrictions",
58912+ .data = &grsec_enable_fifo,
58913+ .maxlen = sizeof(int),
58914+ .mode = 0600,
58915+ .proc_handler = &proc_dointvec,
58916+ },
58917+#endif
58918+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58919+ {
58920+ .procname = "ptrace_readexec",
58921+ .data = &grsec_enable_ptrace_readexec,
58922+ .maxlen = sizeof(int),
58923+ .mode = 0600,
58924+ .proc_handler = &proc_dointvec,
58925+ },
58926+#endif
58927+#ifdef CONFIG_GRKERNSEC_SETXID
58928+ {
58929+ .procname = "consistent_setxid",
58930+ .data = &grsec_enable_setxid,
58931+ .maxlen = sizeof(int),
58932+ .mode = 0600,
58933+ .proc_handler = &proc_dointvec,
58934+ },
58935+#endif
58936+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58937+ {
58938+ .procname = "ip_blackhole",
58939+ .data = &grsec_enable_blackhole,
58940+ .maxlen = sizeof(int),
58941+ .mode = 0600,
58942+ .proc_handler = &proc_dointvec,
58943+ },
58944+ {
58945+ .procname = "lastack_retries",
58946+ .data = &grsec_lastack_retries,
58947+ .maxlen = sizeof(int),
58948+ .mode = 0600,
58949+ .proc_handler = &proc_dointvec,
58950+ },
58951+#endif
58952+#ifdef CONFIG_GRKERNSEC_EXECLOG
58953+ {
58954+ .procname = "exec_logging",
58955+ .data = &grsec_enable_execlog,
58956+ .maxlen = sizeof(int),
58957+ .mode = 0600,
58958+ .proc_handler = &proc_dointvec,
58959+ },
58960+#endif
58961+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58962+ {
58963+ .procname = "rwxmap_logging",
58964+ .data = &grsec_enable_log_rwxmaps,
58965+ .maxlen = sizeof(int),
58966+ .mode = 0600,
58967+ .proc_handler = &proc_dointvec,
58968+ },
58969+#endif
58970+#ifdef CONFIG_GRKERNSEC_SIGNAL
58971+ {
58972+ .procname = "signal_logging",
58973+ .data = &grsec_enable_signal,
58974+ .maxlen = sizeof(int),
58975+ .mode = 0600,
58976+ .proc_handler = &proc_dointvec,
58977+ },
58978+#endif
58979+#ifdef CONFIG_GRKERNSEC_FORKFAIL
58980+ {
58981+ .procname = "forkfail_logging",
58982+ .data = &grsec_enable_forkfail,
58983+ .maxlen = sizeof(int),
58984+ .mode = 0600,
58985+ .proc_handler = &proc_dointvec,
58986+ },
58987+#endif
58988+#ifdef CONFIG_GRKERNSEC_TIME
58989+ {
58990+ .procname = "timechange_logging",
58991+ .data = &grsec_enable_time,
58992+ .maxlen = sizeof(int),
58993+ .mode = 0600,
58994+ .proc_handler = &proc_dointvec,
58995+ },
58996+#endif
58997+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58998+ {
58999+ .procname = "chroot_deny_shmat",
59000+ .data = &grsec_enable_chroot_shmat,
59001+ .maxlen = sizeof(int),
59002+ .mode = 0600,
59003+ .proc_handler = &proc_dointvec,
59004+ },
59005+#endif
59006+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
59007+ {
59008+ .procname = "chroot_deny_unix",
59009+ .data = &grsec_enable_chroot_unix,
59010+ .maxlen = sizeof(int),
59011+ .mode = 0600,
59012+ .proc_handler = &proc_dointvec,
59013+ },
59014+#endif
59015+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
59016+ {
59017+ .procname = "chroot_deny_mount",
59018+ .data = &grsec_enable_chroot_mount,
59019+ .maxlen = sizeof(int),
59020+ .mode = 0600,
59021+ .proc_handler = &proc_dointvec,
59022+ },
59023+#endif
59024+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
59025+ {
59026+ .procname = "chroot_deny_fchdir",
59027+ .data = &grsec_enable_chroot_fchdir,
59028+ .maxlen = sizeof(int),
59029+ .mode = 0600,
59030+ .proc_handler = &proc_dointvec,
59031+ },
59032+#endif
59033+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
59034+ {
59035+ .procname = "chroot_deny_chroot",
59036+ .data = &grsec_enable_chroot_double,
59037+ .maxlen = sizeof(int),
59038+ .mode = 0600,
59039+ .proc_handler = &proc_dointvec,
59040+ },
59041+#endif
59042+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
59043+ {
59044+ .procname = "chroot_deny_pivot",
59045+ .data = &grsec_enable_chroot_pivot,
59046+ .maxlen = sizeof(int),
59047+ .mode = 0600,
59048+ .proc_handler = &proc_dointvec,
59049+ },
59050+#endif
59051+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59052+ {
59053+ .procname = "chroot_enforce_chdir",
59054+ .data = &grsec_enable_chroot_chdir,
59055+ .maxlen = sizeof(int),
59056+ .mode = 0600,
59057+ .proc_handler = &proc_dointvec,
59058+ },
59059+#endif
59060+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59061+ {
59062+ .procname = "chroot_deny_chmod",
59063+ .data = &grsec_enable_chroot_chmod,
59064+ .maxlen = sizeof(int),
59065+ .mode = 0600,
59066+ .proc_handler = &proc_dointvec,
59067+ },
59068+#endif
59069+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59070+ {
59071+ .procname = "chroot_deny_mknod",
59072+ .data = &grsec_enable_chroot_mknod,
59073+ .maxlen = sizeof(int),
59074+ .mode = 0600,
59075+ .proc_handler = &proc_dointvec,
59076+ },
59077+#endif
59078+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59079+ {
59080+ .procname = "chroot_restrict_nice",
59081+ .data = &grsec_enable_chroot_nice,
59082+ .maxlen = sizeof(int),
59083+ .mode = 0600,
59084+ .proc_handler = &proc_dointvec,
59085+ },
59086+#endif
59087+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59088+ {
59089+ .procname = "chroot_execlog",
59090+ .data = &grsec_enable_chroot_execlog,
59091+ .maxlen = sizeof(int),
59092+ .mode = 0600,
59093+ .proc_handler = &proc_dointvec,
59094+ },
59095+#endif
59096+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59097+ {
59098+ .procname = "chroot_caps",
59099+ .data = &grsec_enable_chroot_caps,
59100+ .maxlen = sizeof(int),
59101+ .mode = 0600,
59102+ .proc_handler = &proc_dointvec,
59103+ },
59104+#endif
59105+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59106+ {
59107+ .procname = "chroot_deny_sysctl",
59108+ .data = &grsec_enable_chroot_sysctl,
59109+ .maxlen = sizeof(int),
59110+ .mode = 0600,
59111+ .proc_handler = &proc_dointvec,
59112+ },
59113+#endif
59114+#ifdef CONFIG_GRKERNSEC_TPE
59115+ {
59116+ .procname = "tpe",
59117+ .data = &grsec_enable_tpe,
59118+ .maxlen = sizeof(int),
59119+ .mode = 0600,
59120+ .proc_handler = &proc_dointvec,
59121+ },
59122+ {
59123+ .procname = "tpe_gid",
59124+ .data = &grsec_tpe_gid,
59125+ .maxlen = sizeof(int),
59126+ .mode = 0600,
59127+ .proc_handler = &proc_dointvec,
59128+ },
59129+#endif
59130+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59131+ {
59132+ .procname = "tpe_invert",
59133+ .data = &grsec_enable_tpe_invert,
59134+ .maxlen = sizeof(int),
59135+ .mode = 0600,
59136+ .proc_handler = &proc_dointvec,
59137+ },
59138+#endif
59139+#ifdef CONFIG_GRKERNSEC_TPE_ALL
59140+ {
59141+ .procname = "tpe_restrict_all",
59142+ .data = &grsec_enable_tpe_all,
59143+ .maxlen = sizeof(int),
59144+ .mode = 0600,
59145+ .proc_handler = &proc_dointvec,
59146+ },
59147+#endif
59148+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59149+ {
59150+ .procname = "socket_all",
59151+ .data = &grsec_enable_socket_all,
59152+ .maxlen = sizeof(int),
59153+ .mode = 0600,
59154+ .proc_handler = &proc_dointvec,
59155+ },
59156+ {
59157+ .procname = "socket_all_gid",
59158+ .data = &grsec_socket_all_gid,
59159+ .maxlen = sizeof(int),
59160+ .mode = 0600,
59161+ .proc_handler = &proc_dointvec,
59162+ },
59163+#endif
59164+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59165+ {
59166+ .procname = "socket_client",
59167+ .data = &grsec_enable_socket_client,
59168+ .maxlen = sizeof(int),
59169+ .mode = 0600,
59170+ .proc_handler = &proc_dointvec,
59171+ },
59172+ {
59173+ .procname = "socket_client_gid",
59174+ .data = &grsec_socket_client_gid,
59175+ .maxlen = sizeof(int),
59176+ .mode = 0600,
59177+ .proc_handler = &proc_dointvec,
59178+ },
59179+#endif
59180+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59181+ {
59182+ .procname = "socket_server",
59183+ .data = &grsec_enable_socket_server,
59184+ .maxlen = sizeof(int),
59185+ .mode = 0600,
59186+ .proc_handler = &proc_dointvec,
59187+ },
59188+ {
59189+ .procname = "socket_server_gid",
59190+ .data = &grsec_socket_server_gid,
59191+ .maxlen = sizeof(int),
59192+ .mode = 0600,
59193+ .proc_handler = &proc_dointvec,
59194+ },
59195+#endif
59196+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
59197+ {
59198+ .procname = "audit_group",
59199+ .data = &grsec_enable_group,
59200+ .maxlen = sizeof(int),
59201+ .mode = 0600,
59202+ .proc_handler = &proc_dointvec,
59203+ },
59204+ {
59205+ .procname = "audit_gid",
59206+ .data = &grsec_audit_gid,
59207+ .maxlen = sizeof(int),
59208+ .mode = 0600,
59209+ .proc_handler = &proc_dointvec,
59210+ },
59211+#endif
59212+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
59213+ {
59214+ .procname = "audit_chdir",
59215+ .data = &grsec_enable_chdir,
59216+ .maxlen = sizeof(int),
59217+ .mode = 0600,
59218+ .proc_handler = &proc_dointvec,
59219+ },
59220+#endif
59221+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59222+ {
59223+ .procname = "audit_mount",
59224+ .data = &grsec_enable_mount,
59225+ .maxlen = sizeof(int),
59226+ .mode = 0600,
59227+ .proc_handler = &proc_dointvec,
59228+ },
59229+#endif
59230+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59231+ {
59232+ .procname = "audit_textrel",
59233+ .data = &grsec_enable_audit_textrel,
59234+ .maxlen = sizeof(int),
59235+ .mode = 0600,
59236+ .proc_handler = &proc_dointvec,
59237+ },
59238+#endif
59239+#ifdef CONFIG_GRKERNSEC_DMESG
59240+ {
59241+ .procname = "dmesg",
59242+ .data = &grsec_enable_dmesg,
59243+ .maxlen = sizeof(int),
59244+ .mode = 0600,
59245+ .proc_handler = &proc_dointvec,
59246+ },
59247+#endif
59248+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59249+ {
59250+ .procname = "chroot_findtask",
59251+ .data = &grsec_enable_chroot_findtask,
59252+ .maxlen = sizeof(int),
59253+ .mode = 0600,
59254+ .proc_handler = &proc_dointvec,
59255+ },
59256+#endif
59257+#ifdef CONFIG_GRKERNSEC_RESLOG
59258+ {
59259+ .procname = "resource_logging",
59260+ .data = &grsec_resource_logging,
59261+ .maxlen = sizeof(int),
59262+ .mode = 0600,
59263+ .proc_handler = &proc_dointvec,
59264+ },
59265+#endif
59266+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59267+ {
59268+ .procname = "audit_ptrace",
59269+ .data = &grsec_enable_audit_ptrace,
59270+ .maxlen = sizeof(int),
59271+ .mode = 0600,
59272+ .proc_handler = &proc_dointvec,
59273+ },
59274+#endif
59275+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59276+ {
59277+ .procname = "harden_ptrace",
59278+ .data = &grsec_enable_harden_ptrace,
59279+ .maxlen = sizeof(int),
59280+ .mode = 0600,
59281+ .proc_handler = &proc_dointvec,
59282+ },
59283+#endif
59284+ {
59285+ .procname = "grsec_lock",
59286+ .data = &grsec_lock,
59287+ .maxlen = sizeof(int),
59288+ .mode = 0600,
59289+ .proc_handler = &proc_dointvec,
59290+ },
59291+#endif
59292+#ifdef CONFIG_GRKERNSEC_ROFS
59293+ {
59294+ .procname = "romount_protect",
59295+ .data = &grsec_enable_rofs,
59296+ .maxlen = sizeof(int),
59297+ .mode = 0600,
59298+ .proc_handler = &proc_dointvec_minmax,
59299+ .extra1 = &one,
59300+ .extra2 = &one,
59301+ },
59302+#endif
59303+ { }
59304+};
59305+#endif
59306diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
59307new file mode 100644
59308index 0000000..0dc13c3
59309--- /dev/null
59310+++ b/grsecurity/grsec_time.c
59311@@ -0,0 +1,16 @@
59312+#include <linux/kernel.h>
59313+#include <linux/sched.h>
59314+#include <linux/grinternal.h>
59315+#include <linux/module.h>
59316+
59317+void
59318+gr_log_timechange(void)
59319+{
59320+#ifdef CONFIG_GRKERNSEC_TIME
59321+ if (grsec_enable_time)
59322+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
59323+#endif
59324+ return;
59325+}
59326+
59327+EXPORT_SYMBOL(gr_log_timechange);
59328diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
59329new file mode 100644
59330index 0000000..07e0dc0
59331--- /dev/null
59332+++ b/grsecurity/grsec_tpe.c
59333@@ -0,0 +1,73 @@
59334+#include <linux/kernel.h>
59335+#include <linux/sched.h>
59336+#include <linux/file.h>
59337+#include <linux/fs.h>
59338+#include <linux/grinternal.h>
59339+
59340+extern int gr_acl_tpe_check(void);
59341+
59342+int
59343+gr_tpe_allow(const struct file *file)
59344+{
59345+#ifdef CONFIG_GRKERNSEC
59346+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
59347+ const struct cred *cred = current_cred();
59348+ char *msg = NULL;
59349+ char *msg2 = NULL;
59350+
59351+ // never restrict root
59352+ if (!cred->uid)
59353+ return 1;
59354+
59355+ if (grsec_enable_tpe) {
59356+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59357+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
59358+ msg = "not being in trusted group";
59359+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
59360+ msg = "being in untrusted group";
59361+#else
59362+ if (in_group_p(grsec_tpe_gid))
59363+ msg = "being in untrusted group";
59364+#endif
59365+ }
59366+ if (!msg && gr_acl_tpe_check())
59367+ msg = "being in untrusted role";
59368+
59369+ // not in any affected group/role
59370+ if (!msg)
59371+ goto next_check;
59372+
59373+ if (inode->i_uid)
59374+ msg2 = "file in non-root-owned directory";
59375+ else if (inode->i_mode & S_IWOTH)
59376+ msg2 = "file in world-writable directory";
59377+ else if (inode->i_mode & S_IWGRP)
59378+ msg2 = "file in group-writable directory";
59379+
59380+ if (msg && msg2) {
59381+ char fullmsg[70] = {0};
59382+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
59383+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
59384+ return 0;
59385+ }
59386+ msg = NULL;
59387+next_check:
59388+#ifdef CONFIG_GRKERNSEC_TPE_ALL
59389+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
59390+ return 1;
59391+
59392+ if (inode->i_uid && (inode->i_uid != cred->uid))
59393+ msg = "directory not owned by user";
59394+ else if (inode->i_mode & S_IWOTH)
59395+ msg = "file in world-writable directory";
59396+ else if (inode->i_mode & S_IWGRP)
59397+ msg = "file in group-writable directory";
59398+
59399+ if (msg) {
59400+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
59401+ return 0;
59402+ }
59403+#endif
59404+#endif
59405+ return 1;
59406+}
59407diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
59408new file mode 100644
59409index 0000000..9f7b1ac
59410--- /dev/null
59411+++ b/grsecurity/grsum.c
59412@@ -0,0 +1,61 @@
59413+#include <linux/err.h>
59414+#include <linux/kernel.h>
59415+#include <linux/sched.h>
59416+#include <linux/mm.h>
59417+#include <linux/scatterlist.h>
59418+#include <linux/crypto.h>
59419+#include <linux/gracl.h>
59420+
59421+
59422+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
59423+#error "crypto and sha256 must be built into the kernel"
59424+#endif
59425+
59426+int
59427+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
59428+{
59429+ char *p;
59430+ struct crypto_hash *tfm;
59431+ struct hash_desc desc;
59432+ struct scatterlist sg;
59433+ unsigned char temp_sum[GR_SHA_LEN];
59434+ volatile int retval = 0;
59435+ volatile int dummy = 0;
59436+ unsigned int i;
59437+
59438+ sg_init_table(&sg, 1);
59439+
59440+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
59441+ if (IS_ERR(tfm)) {
59442+ /* should never happen, since sha256 should be built in */
59443+ return 1;
59444+ }
59445+
59446+ desc.tfm = tfm;
59447+ desc.flags = 0;
59448+
59449+ crypto_hash_init(&desc);
59450+
59451+ p = salt;
59452+ sg_set_buf(&sg, p, GR_SALT_LEN);
59453+ crypto_hash_update(&desc, &sg, sg.length);
59454+
59455+ p = entry->pw;
59456+ sg_set_buf(&sg, p, strlen(p));
59457+
59458+ crypto_hash_update(&desc, &sg, sg.length);
59459+
59460+ crypto_hash_final(&desc, temp_sum);
59461+
59462+ memset(entry->pw, 0, GR_PW_LEN);
59463+
59464+ for (i = 0; i < GR_SHA_LEN; i++)
59465+ if (sum[i] != temp_sum[i])
59466+ retval = 1;
59467+ else
59468+ dummy = 1; // waste a cycle
59469+
59470+ crypto_free_hash(tfm);
59471+
59472+ return retval;
59473+}
59474diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
59475index 6cd5b64..f620d2d 100644
59476--- a/include/acpi/acpi_bus.h
59477+++ b/include/acpi/acpi_bus.h
59478@@ -107,7 +107,7 @@ struct acpi_device_ops {
59479 acpi_op_bind bind;
59480 acpi_op_unbind unbind;
59481 acpi_op_notify notify;
59482-};
59483+} __no_const;
59484
59485 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
59486
59487diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
59488index b7babf0..71e4e74 100644
59489--- a/include/asm-generic/atomic-long.h
59490+++ b/include/asm-generic/atomic-long.h
59491@@ -22,6 +22,12 @@
59492
59493 typedef atomic64_t atomic_long_t;
59494
59495+#ifdef CONFIG_PAX_REFCOUNT
59496+typedef atomic64_unchecked_t atomic_long_unchecked_t;
59497+#else
59498+typedef atomic64_t atomic_long_unchecked_t;
59499+#endif
59500+
59501 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
59502
59503 static inline long atomic_long_read(atomic_long_t *l)
59504@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59505 return (long)atomic64_read(v);
59506 }
59507
59508+#ifdef CONFIG_PAX_REFCOUNT
59509+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59510+{
59511+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59512+
59513+ return (long)atomic64_read_unchecked(v);
59514+}
59515+#endif
59516+
59517 static inline void atomic_long_set(atomic_long_t *l, long i)
59518 {
59519 atomic64_t *v = (atomic64_t *)l;
59520@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59521 atomic64_set(v, i);
59522 }
59523
59524+#ifdef CONFIG_PAX_REFCOUNT
59525+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59526+{
59527+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59528+
59529+ atomic64_set_unchecked(v, i);
59530+}
59531+#endif
59532+
59533 static inline void atomic_long_inc(atomic_long_t *l)
59534 {
59535 atomic64_t *v = (atomic64_t *)l;
59536@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59537 atomic64_inc(v);
59538 }
59539
59540+#ifdef CONFIG_PAX_REFCOUNT
59541+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59542+{
59543+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59544+
59545+ atomic64_inc_unchecked(v);
59546+}
59547+#endif
59548+
59549 static inline void atomic_long_dec(atomic_long_t *l)
59550 {
59551 atomic64_t *v = (atomic64_t *)l;
59552@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59553 atomic64_dec(v);
59554 }
59555
59556+#ifdef CONFIG_PAX_REFCOUNT
59557+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59558+{
59559+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59560+
59561+ atomic64_dec_unchecked(v);
59562+}
59563+#endif
59564+
59565 static inline void atomic_long_add(long i, atomic_long_t *l)
59566 {
59567 atomic64_t *v = (atomic64_t *)l;
59568@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59569 atomic64_add(i, v);
59570 }
59571
59572+#ifdef CONFIG_PAX_REFCOUNT
59573+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59574+{
59575+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59576+
59577+ atomic64_add_unchecked(i, v);
59578+}
59579+#endif
59580+
59581 static inline void atomic_long_sub(long i, atomic_long_t *l)
59582 {
59583 atomic64_t *v = (atomic64_t *)l;
59584@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59585 atomic64_sub(i, v);
59586 }
59587
59588+#ifdef CONFIG_PAX_REFCOUNT
59589+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59590+{
59591+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59592+
59593+ atomic64_sub_unchecked(i, v);
59594+}
59595+#endif
59596+
59597 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59598 {
59599 atomic64_t *v = (atomic64_t *)l;
59600@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59601 return (long)atomic64_inc_return(v);
59602 }
59603
59604+#ifdef CONFIG_PAX_REFCOUNT
59605+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59606+{
59607+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59608+
59609+ return (long)atomic64_inc_return_unchecked(v);
59610+}
59611+#endif
59612+
59613 static inline long atomic_long_dec_return(atomic_long_t *l)
59614 {
59615 atomic64_t *v = (atomic64_t *)l;
59616@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59617
59618 typedef atomic_t atomic_long_t;
59619
59620+#ifdef CONFIG_PAX_REFCOUNT
59621+typedef atomic_unchecked_t atomic_long_unchecked_t;
59622+#else
59623+typedef atomic_t atomic_long_unchecked_t;
59624+#endif
59625+
59626 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
59627 static inline long atomic_long_read(atomic_long_t *l)
59628 {
59629@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59630 return (long)atomic_read(v);
59631 }
59632
59633+#ifdef CONFIG_PAX_REFCOUNT
59634+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59635+{
59636+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59637+
59638+ return (long)atomic_read_unchecked(v);
59639+}
59640+#endif
59641+
59642 static inline void atomic_long_set(atomic_long_t *l, long i)
59643 {
59644 atomic_t *v = (atomic_t *)l;
59645@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59646 atomic_set(v, i);
59647 }
59648
59649+#ifdef CONFIG_PAX_REFCOUNT
59650+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59651+{
59652+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59653+
59654+ atomic_set_unchecked(v, i);
59655+}
59656+#endif
59657+
59658 static inline void atomic_long_inc(atomic_long_t *l)
59659 {
59660 atomic_t *v = (atomic_t *)l;
59661@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59662 atomic_inc(v);
59663 }
59664
59665+#ifdef CONFIG_PAX_REFCOUNT
59666+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59667+{
59668+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59669+
59670+ atomic_inc_unchecked(v);
59671+}
59672+#endif
59673+
59674 static inline void atomic_long_dec(atomic_long_t *l)
59675 {
59676 atomic_t *v = (atomic_t *)l;
59677@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59678 atomic_dec(v);
59679 }
59680
59681+#ifdef CONFIG_PAX_REFCOUNT
59682+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59683+{
59684+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59685+
59686+ atomic_dec_unchecked(v);
59687+}
59688+#endif
59689+
59690 static inline void atomic_long_add(long i, atomic_long_t *l)
59691 {
59692 atomic_t *v = (atomic_t *)l;
59693@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59694 atomic_add(i, v);
59695 }
59696
59697+#ifdef CONFIG_PAX_REFCOUNT
59698+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59699+{
59700+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59701+
59702+ atomic_add_unchecked(i, v);
59703+}
59704+#endif
59705+
59706 static inline void atomic_long_sub(long i, atomic_long_t *l)
59707 {
59708 atomic_t *v = (atomic_t *)l;
59709@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59710 atomic_sub(i, v);
59711 }
59712
59713+#ifdef CONFIG_PAX_REFCOUNT
59714+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59715+{
59716+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59717+
59718+ atomic_sub_unchecked(i, v);
59719+}
59720+#endif
59721+
59722 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59723 {
59724 atomic_t *v = (atomic_t *)l;
59725@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59726 return (long)atomic_inc_return(v);
59727 }
59728
59729+#ifdef CONFIG_PAX_REFCOUNT
59730+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59731+{
59732+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59733+
59734+ return (long)atomic_inc_return_unchecked(v);
59735+}
59736+#endif
59737+
59738 static inline long atomic_long_dec_return(atomic_long_t *l)
59739 {
59740 atomic_t *v = (atomic_t *)l;
59741@@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59742
59743 #endif /* BITS_PER_LONG == 64 */
59744
59745+#ifdef CONFIG_PAX_REFCOUNT
59746+static inline void pax_refcount_needs_these_functions(void)
59747+{
59748+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
59749+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59750+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59751+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59752+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
59753+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
59754+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
59755+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
59756+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59757+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
59758+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
59759+
59760+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59761+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59762+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
59763+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
59764+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59765+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
59766+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
59767+}
59768+#else
59769+#define atomic_read_unchecked(v) atomic_read(v)
59770+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59771+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59772+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59773+#define atomic_inc_unchecked(v) atomic_inc(v)
59774+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
59775+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
59776+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
59777+#define atomic_dec_unchecked(v) atomic_dec(v)
59778+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59779+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
59780+
59781+#define atomic_long_read_unchecked(v) atomic_long_read(v)
59782+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59783+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
59784+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
59785+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59786+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
59787+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
59788+#endif
59789+
59790 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
59791diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
59792index b18ce4f..2ee2843 100644
59793--- a/include/asm-generic/atomic64.h
59794+++ b/include/asm-generic/atomic64.h
59795@@ -16,6 +16,8 @@ typedef struct {
59796 long long counter;
59797 } atomic64_t;
59798
59799+typedef atomic64_t atomic64_unchecked_t;
59800+
59801 #define ATOMIC64_INIT(i) { (i) }
59802
59803 extern long long atomic64_read(const atomic64_t *v);
59804@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
59805 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
59806 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
59807
59808+#define atomic64_read_unchecked(v) atomic64_read(v)
59809+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
59810+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
59811+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
59812+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
59813+#define atomic64_inc_unchecked(v) atomic64_inc(v)
59814+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
59815+#define atomic64_dec_unchecked(v) atomic64_dec(v)
59816+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
59817+
59818 #endif /* _ASM_GENERIC_ATOMIC64_H */
59819diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59820index 1bfcfe5..e04c5c9 100644
59821--- a/include/asm-generic/cache.h
59822+++ b/include/asm-generic/cache.h
59823@@ -6,7 +6,7 @@
59824 * cache lines need to provide their own cache.h.
59825 */
59826
59827-#define L1_CACHE_SHIFT 5
59828-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
59829+#define L1_CACHE_SHIFT 5UL
59830+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
59831
59832 #endif /* __ASM_GENERIC_CACHE_H */
59833diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
59834index 0d68a1e..b74a761 100644
59835--- a/include/asm-generic/emergency-restart.h
59836+++ b/include/asm-generic/emergency-restart.h
59837@@ -1,7 +1,7 @@
59838 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
59839 #define _ASM_GENERIC_EMERGENCY_RESTART_H
59840
59841-static inline void machine_emergency_restart(void)
59842+static inline __noreturn void machine_emergency_restart(void)
59843 {
59844 machine_restart(NULL);
59845 }
59846diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59847index 0232ccb..13d9165 100644
59848--- a/include/asm-generic/kmap_types.h
59849+++ b/include/asm-generic/kmap_types.h
59850@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
59851 KMAP_D(17) KM_NMI,
59852 KMAP_D(18) KM_NMI_PTE,
59853 KMAP_D(19) KM_KDB,
59854+KMAP_D(20) KM_CLEARPAGE,
59855 /*
59856 * Remember to update debug_kmap_atomic() when adding new kmap types!
59857 */
59858-KMAP_D(20) KM_TYPE_NR
59859+KMAP_D(21) KM_TYPE_NR
59860 };
59861
59862 #undef KMAP_D
59863diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
59864index 9ceb03b..2efbcbd 100644
59865--- a/include/asm-generic/local.h
59866+++ b/include/asm-generic/local.h
59867@@ -39,6 +39,7 @@ typedef struct
59868 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
59869 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
59870 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
59871+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
59872
59873 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
59874 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
59875diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59876index 725612b..9cc513a 100644
59877--- a/include/asm-generic/pgtable-nopmd.h
59878+++ b/include/asm-generic/pgtable-nopmd.h
59879@@ -1,14 +1,19 @@
59880 #ifndef _PGTABLE_NOPMD_H
59881 #define _PGTABLE_NOPMD_H
59882
59883-#ifndef __ASSEMBLY__
59884-
59885 #include <asm-generic/pgtable-nopud.h>
59886
59887-struct mm_struct;
59888-
59889 #define __PAGETABLE_PMD_FOLDED
59890
59891+#define PMD_SHIFT PUD_SHIFT
59892+#define PTRS_PER_PMD 1
59893+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59894+#define PMD_MASK (~(PMD_SIZE-1))
59895+
59896+#ifndef __ASSEMBLY__
59897+
59898+struct mm_struct;
59899+
59900 /*
59901 * Having the pmd type consist of a pud gets the size right, and allows
59902 * us to conceptually access the pud entry that this pmd is folded into
59903@@ -16,11 +21,6 @@ struct mm_struct;
59904 */
59905 typedef struct { pud_t pud; } pmd_t;
59906
59907-#define PMD_SHIFT PUD_SHIFT
59908-#define PTRS_PER_PMD 1
59909-#define PMD_SIZE (1UL << PMD_SHIFT)
59910-#define PMD_MASK (~(PMD_SIZE-1))
59911-
59912 /*
59913 * The "pud_xxx()" functions here are trivial for a folded two-level
59914 * setup: the pmd is never bad, and a pmd always exists (as it's folded
59915diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
59916index 810431d..ccc3638 100644
59917--- a/include/asm-generic/pgtable-nopud.h
59918+++ b/include/asm-generic/pgtable-nopud.h
59919@@ -1,10 +1,15 @@
59920 #ifndef _PGTABLE_NOPUD_H
59921 #define _PGTABLE_NOPUD_H
59922
59923-#ifndef __ASSEMBLY__
59924-
59925 #define __PAGETABLE_PUD_FOLDED
59926
59927+#define PUD_SHIFT PGDIR_SHIFT
59928+#define PTRS_PER_PUD 1
59929+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59930+#define PUD_MASK (~(PUD_SIZE-1))
59931+
59932+#ifndef __ASSEMBLY__
59933+
59934 /*
59935 * Having the pud type consist of a pgd gets the size right, and allows
59936 * us to conceptually access the pgd entry that this pud is folded into
59937@@ -12,11 +17,6 @@
59938 */
59939 typedef struct { pgd_t pgd; } pud_t;
59940
59941-#define PUD_SHIFT PGDIR_SHIFT
59942-#define PTRS_PER_PUD 1
59943-#define PUD_SIZE (1UL << PUD_SHIFT)
59944-#define PUD_MASK (~(PUD_SIZE-1))
59945-
59946 /*
59947 * The "pgd_xxx()" functions here are trivial for a folded two-level
59948 * setup: the pud is never bad, and a pud always exists (as it's folded
59949diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
59950index a03c098..9624b83 100644
59951--- a/include/asm-generic/pgtable.h
59952+++ b/include/asm-generic/pgtable.h
59953@@ -504,6 +504,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
59954
59955 #endif /* CONFIG_MMU */
59956
59957+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
59958+static inline unsigned long pax_open_kernel(void) { return 0; }
59959+#endif
59960+
59961+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
59962+static inline unsigned long pax_close_kernel(void) { return 0; }
59963+#endif
59964+
59965 #endif /* !__ASSEMBLY__ */
59966
59967 #endif /* _ASM_GENERIC_PGTABLE_H */
59968diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
59969index 9788568..510dece 100644
59970--- a/include/asm-generic/uaccess.h
59971+++ b/include/asm-generic/uaccess.h
59972@@ -76,6 +76,8 @@ extern unsigned long search_exception_table(unsigned long);
59973 */
59974 #ifndef __copy_from_user
59975 static inline __must_check long __copy_from_user(void *to,
59976+ const void __user * from, unsigned long n) __size_overflow(3);
59977+static inline __must_check long __copy_from_user(void *to,
59978 const void __user * from, unsigned long n)
59979 {
59980 if (__builtin_constant_p(n)) {
59981@@ -106,6 +108,8 @@ static inline __must_check long __copy_from_user(void *to,
59982
59983 #ifndef __copy_to_user
59984 static inline __must_check long __copy_to_user(void __user *to,
59985+ const void *from, unsigned long n) __size_overflow(3);
59986+static inline __must_check long __copy_to_user(void __user *to,
59987 const void *from, unsigned long n)
59988 {
59989 if (__builtin_constant_p(n)) {
59990@@ -224,6 +228,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
59991 -EFAULT; \
59992 })
59993
59994+static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) __size_overflow(1);
59995 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
59996 {
59997 size = __copy_from_user(x, ptr, size);
59998@@ -240,6 +245,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
59999 #define __copy_to_user_inatomic __copy_to_user
60000 #endif
60001
60002+static inline long copy_from_user(void *to, const void __user * from, unsigned long n) __size_overflow(3);
60003 static inline long copy_from_user(void *to,
60004 const void __user * from, unsigned long n)
60005 {
60006@@ -250,6 +256,7 @@ static inline long copy_from_user(void *to,
60007 return n;
60008 }
60009
60010+static inline long copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
60011 static inline long copy_to_user(void __user *to,
60012 const void *from, unsigned long n)
60013 {
60014@@ -314,6 +321,8 @@ static inline long strlen_user(const char __user *src)
60015 */
60016 #ifndef __clear_user
60017 static inline __must_check unsigned long
60018+__clear_user(void __user *to, unsigned long n) __size_overflow(2);
60019+static inline __must_check unsigned long
60020 __clear_user(void __user *to, unsigned long n)
60021 {
60022 memset((void __force *)to, 0, n);
60023@@ -322,6 +331,8 @@ __clear_user(void __user *to, unsigned long n)
60024 #endif
60025
60026 static inline __must_check unsigned long
60027+clear_user(void __user *to, unsigned long n) __size_overflow(2);
60028+static inline __must_check unsigned long
60029 clear_user(void __user *to, unsigned long n)
60030 {
60031 might_sleep();
60032diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
60033index b5e2e4c..6a5373e 100644
60034--- a/include/asm-generic/vmlinux.lds.h
60035+++ b/include/asm-generic/vmlinux.lds.h
60036@@ -217,6 +217,7 @@
60037 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
60038 VMLINUX_SYMBOL(__start_rodata) = .; \
60039 *(.rodata) *(.rodata.*) \
60040+ *(.data..read_only) \
60041 *(__vermagic) /* Kernel version magic */ \
60042 . = ALIGN(8); \
60043 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
60044@@ -722,17 +723,18 @@
60045 * section in the linker script will go there too. @phdr should have
60046 * a leading colon.
60047 *
60048- * Note that this macros defines __per_cpu_load as an absolute symbol.
60049+ * Note that this macros defines per_cpu_load as an absolute symbol.
60050 * If there is no need to put the percpu section at a predetermined
60051 * address, use PERCPU_SECTION.
60052 */
60053 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
60054- VMLINUX_SYMBOL(__per_cpu_load) = .; \
60055- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
60056+ per_cpu_load = .; \
60057+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
60058 - LOAD_OFFSET) { \
60059+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
60060 PERCPU_INPUT(cacheline) \
60061 } phdr \
60062- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
60063+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
60064
60065 /**
60066 * PERCPU_SECTION - define output section for percpu area, simple version
60067diff --git a/include/drm/drmP.h b/include/drm/drmP.h
60068index 92f0981..d44a37c 100644
60069--- a/include/drm/drmP.h
60070+++ b/include/drm/drmP.h
60071@@ -72,6 +72,7 @@
60072 #include <linux/workqueue.h>
60073 #include <linux/poll.h>
60074 #include <asm/pgalloc.h>
60075+#include <asm/local.h>
60076 #include "drm.h"
60077
60078 #include <linux/idr.h>
60079@@ -1038,7 +1039,7 @@ struct drm_device {
60080
60081 /** \name Usage Counters */
60082 /*@{ */
60083- int open_count; /**< Outstanding files open */
60084+ local_t open_count; /**< Outstanding files open */
60085 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
60086 atomic_t vma_count; /**< Outstanding vma areas open */
60087 int buf_use; /**< Buffers in use -- cannot alloc */
60088@@ -1049,7 +1050,7 @@ struct drm_device {
60089 /*@{ */
60090 unsigned long counters;
60091 enum drm_stat_type types[15];
60092- atomic_t counts[15];
60093+ atomic_unchecked_t counts[15];
60094 /*@} */
60095
60096 struct list_head filelist;
60097diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
60098index 37515d1..34fa8b0 100644
60099--- a/include/drm/drm_crtc_helper.h
60100+++ b/include/drm/drm_crtc_helper.h
60101@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
60102
60103 /* disable crtc when not in use - more explicit than dpms off */
60104 void (*disable)(struct drm_crtc *crtc);
60105-};
60106+} __no_const;
60107
60108 struct drm_encoder_helper_funcs {
60109 void (*dpms)(struct drm_encoder *encoder, int mode);
60110@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
60111 struct drm_connector *connector);
60112 /* disable encoder when not in use - more explicit than dpms off */
60113 void (*disable)(struct drm_encoder *encoder);
60114-};
60115+} __no_const;
60116
60117 struct drm_connector_helper_funcs {
60118 int (*get_modes)(struct drm_connector *connector);
60119diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
60120index 26c1f78..6722682 100644
60121--- a/include/drm/ttm/ttm_memory.h
60122+++ b/include/drm/ttm/ttm_memory.h
60123@@ -47,7 +47,7 @@
60124
60125 struct ttm_mem_shrink {
60126 int (*do_shrink) (struct ttm_mem_shrink *);
60127-};
60128+} __no_const;
60129
60130 /**
60131 * struct ttm_mem_global - Global memory accounting structure.
60132diff --git a/include/linux/a.out.h b/include/linux/a.out.h
60133index e86dfca..40cc55f 100644
60134--- a/include/linux/a.out.h
60135+++ b/include/linux/a.out.h
60136@@ -39,6 +39,14 @@ enum machine_type {
60137 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
60138 };
60139
60140+/* Constants for the N_FLAGS field */
60141+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60142+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
60143+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
60144+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
60145+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60146+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60147+
60148 #if !defined (N_MAGIC)
60149 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
60150 #endif
60151diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
60152index f4ff882..84b53a6 100644
60153--- a/include/linux/atmdev.h
60154+++ b/include/linux/atmdev.h
60155@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
60156 #endif
60157
60158 struct k_atm_aal_stats {
60159-#define __HANDLE_ITEM(i) atomic_t i
60160+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60161 __AAL_STAT_ITEMS
60162 #undef __HANDLE_ITEM
60163 };
60164diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
60165index 0092102..8a801b4 100644
60166--- a/include/linux/binfmts.h
60167+++ b/include/linux/binfmts.h
60168@@ -89,6 +89,7 @@ struct linux_binfmt {
60169 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
60170 int (*load_shlib)(struct file *);
60171 int (*core_dump)(struct coredump_params *cprm);
60172+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
60173 unsigned long min_coredump; /* minimal dump size */
60174 };
60175
60176diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
60177index 606cf33..b72c577 100644
60178--- a/include/linux/blkdev.h
60179+++ b/include/linux/blkdev.h
60180@@ -1379,7 +1379,7 @@ struct block_device_operations {
60181 /* this callback is with swap_lock and sometimes page table lock held */
60182 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
60183 struct module *owner;
60184-};
60185+} __do_const;
60186
60187 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
60188 unsigned long);
60189diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
60190index 4d1a074..88f929a 100644
60191--- a/include/linux/blktrace_api.h
60192+++ b/include/linux/blktrace_api.h
60193@@ -162,7 +162,7 @@ struct blk_trace {
60194 struct dentry *dir;
60195 struct dentry *dropped_file;
60196 struct dentry *msg_file;
60197- atomic_t dropped;
60198+ atomic_unchecked_t dropped;
60199 };
60200
60201 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
60202diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
60203index 83195fb..0b0f77d 100644
60204--- a/include/linux/byteorder/little_endian.h
60205+++ b/include/linux/byteorder/little_endian.h
60206@@ -42,51 +42,51 @@
60207
60208 static inline __le64 __cpu_to_le64p(const __u64 *p)
60209 {
60210- return (__force __le64)*p;
60211+ return (__force const __le64)*p;
60212 }
60213 static inline __u64 __le64_to_cpup(const __le64 *p)
60214 {
60215- return (__force __u64)*p;
60216+ return (__force const __u64)*p;
60217 }
60218 static inline __le32 __cpu_to_le32p(const __u32 *p)
60219 {
60220- return (__force __le32)*p;
60221+ return (__force const __le32)*p;
60222 }
60223 static inline __u32 __le32_to_cpup(const __le32 *p)
60224 {
60225- return (__force __u32)*p;
60226+ return (__force const __u32)*p;
60227 }
60228 static inline __le16 __cpu_to_le16p(const __u16 *p)
60229 {
60230- return (__force __le16)*p;
60231+ return (__force const __le16)*p;
60232 }
60233 static inline __u16 __le16_to_cpup(const __le16 *p)
60234 {
60235- return (__force __u16)*p;
60236+ return (__force const __u16)*p;
60237 }
60238 static inline __be64 __cpu_to_be64p(const __u64 *p)
60239 {
60240- return (__force __be64)__swab64p(p);
60241+ return (__force const __be64)__swab64p(p);
60242 }
60243 static inline __u64 __be64_to_cpup(const __be64 *p)
60244 {
60245- return __swab64p((__u64 *)p);
60246+ return __swab64p((const __u64 *)p);
60247 }
60248 static inline __be32 __cpu_to_be32p(const __u32 *p)
60249 {
60250- return (__force __be32)__swab32p(p);
60251+ return (__force const __be32)__swab32p(p);
60252 }
60253 static inline __u32 __be32_to_cpup(const __be32 *p)
60254 {
60255- return __swab32p((__u32 *)p);
60256+ return __swab32p((const __u32 *)p);
60257 }
60258 static inline __be16 __cpu_to_be16p(const __u16 *p)
60259 {
60260- return (__force __be16)__swab16p(p);
60261+ return (__force const __be16)__swab16p(p);
60262 }
60263 static inline __u16 __be16_to_cpup(const __be16 *p)
60264 {
60265- return __swab16p((__u16 *)p);
60266+ return __swab16p((const __u16 *)p);
60267 }
60268 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
60269 #define __le64_to_cpus(x) do { (void)(x); } while (0)
60270diff --git a/include/linux/cache.h b/include/linux/cache.h
60271index 4c57065..4307975 100644
60272--- a/include/linux/cache.h
60273+++ b/include/linux/cache.h
60274@@ -16,6 +16,10 @@
60275 #define __read_mostly
60276 #endif
60277
60278+#ifndef __read_only
60279+#define __read_only __read_mostly
60280+#endif
60281+
60282 #ifndef ____cacheline_aligned
60283 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
60284 #endif
60285diff --git a/include/linux/capability.h b/include/linux/capability.h
60286index 12d52de..b5f7fa7 100644
60287--- a/include/linux/capability.h
60288+++ b/include/linux/capability.h
60289@@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
60290 extern bool capable(int cap);
60291 extern bool ns_capable(struct user_namespace *ns, int cap);
60292 extern bool nsown_capable(int cap);
60293+extern bool capable_nolog(int cap);
60294+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
60295
60296 /* audit system wants to get cap info from files as well */
60297 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
60298diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
60299index 04ffb2e..6799180 100644
60300--- a/include/linux/cleancache.h
60301+++ b/include/linux/cleancache.h
60302@@ -31,7 +31,7 @@ struct cleancache_ops {
60303 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
60304 void (*flush_inode)(int, struct cleancache_filekey);
60305 void (*flush_fs)(int);
60306-};
60307+} __no_const;
60308
60309 extern struct cleancache_ops
60310 cleancache_register_ops(struct cleancache_ops *ops);
60311diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
60312index 2f40791..567b215 100644
60313--- a/include/linux/compiler-gcc4.h
60314+++ b/include/linux/compiler-gcc4.h
60315@@ -32,6 +32,15 @@
60316 #define __linktime_error(message) __attribute__((__error__(message)))
60317
60318 #if __GNUC_MINOR__ >= 5
60319+
60320+#ifdef CONSTIFY_PLUGIN
60321+#define __no_const __attribute__((no_const))
60322+#define __do_const __attribute__((do_const))
60323+#endif
60324+
60325+#ifdef SIZE_OVERFLOW_PLUGIN
60326+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
60327+#endif
60328 /*
60329 * Mark a position in code as unreachable. This can be used to
60330 * suppress control flow warnings after asm blocks that transfer
60331@@ -47,6 +56,11 @@
60332 #define __noclone __attribute__((__noclone__))
60333
60334 #endif
60335+
60336+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
60337+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
60338+#define __bos0(ptr) __bos((ptr), 0)
60339+#define __bos1(ptr) __bos((ptr), 1)
60340 #endif
60341
60342 #if __GNUC_MINOR__ > 0
60343diff --git a/include/linux/compiler.h b/include/linux/compiler.h
60344index 4a24354..ecaff7a 100644
60345--- a/include/linux/compiler.h
60346+++ b/include/linux/compiler.h
60347@@ -5,31 +5,62 @@
60348
60349 #ifdef __CHECKER__
60350 # define __user __attribute__((noderef, address_space(1)))
60351+# define __force_user __force __user
60352 # define __kernel __attribute__((address_space(0)))
60353+# define __force_kernel __force __kernel
60354 # define __safe __attribute__((safe))
60355 # define __force __attribute__((force))
60356 # define __nocast __attribute__((nocast))
60357 # define __iomem __attribute__((noderef, address_space(2)))
60358+# define __force_iomem __force __iomem
60359 # define __acquires(x) __attribute__((context(x,0,1)))
60360 # define __releases(x) __attribute__((context(x,1,0)))
60361 # define __acquire(x) __context__(x,1)
60362 # define __release(x) __context__(x,-1)
60363 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
60364 # define __percpu __attribute__((noderef, address_space(3)))
60365+# define __force_percpu __force __percpu
60366 #ifdef CONFIG_SPARSE_RCU_POINTER
60367 # define __rcu __attribute__((noderef, address_space(4)))
60368+# define __force_rcu __force __rcu
60369 #else
60370 # define __rcu
60371+# define __force_rcu
60372 #endif
60373 extern void __chk_user_ptr(const volatile void __user *);
60374 extern void __chk_io_ptr(const volatile void __iomem *);
60375+#elif defined(CHECKER_PLUGIN)
60376+//# define __user
60377+//# define __force_user
60378+//# define __kernel
60379+//# define __force_kernel
60380+# define __safe
60381+# define __force
60382+# define __nocast
60383+# define __iomem
60384+# define __force_iomem
60385+# define __chk_user_ptr(x) (void)0
60386+# define __chk_io_ptr(x) (void)0
60387+# define __builtin_warning(x, y...) (1)
60388+# define __acquires(x)
60389+# define __releases(x)
60390+# define __acquire(x) (void)0
60391+# define __release(x) (void)0
60392+# define __cond_lock(x,c) (c)
60393+# define __percpu
60394+# define __force_percpu
60395+# define __rcu
60396+# define __force_rcu
60397 #else
60398 # define __user
60399+# define __force_user
60400 # define __kernel
60401+# define __force_kernel
60402 # define __safe
60403 # define __force
60404 # define __nocast
60405 # define __iomem
60406+# define __force_iomem
60407 # define __chk_user_ptr(x) (void)0
60408 # define __chk_io_ptr(x) (void)0
60409 # define __builtin_warning(x, y...) (1)
60410@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
60411 # define __release(x) (void)0
60412 # define __cond_lock(x,c) (c)
60413 # define __percpu
60414+# define __force_percpu
60415 # define __rcu
60416+# define __force_rcu
60417 #endif
60418
60419 #ifdef __KERNEL__
60420@@ -264,6 +297,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60421 # define __attribute_const__ /* unimplemented */
60422 #endif
60423
60424+#ifndef __no_const
60425+# define __no_const
60426+#endif
60427+
60428+#ifndef __do_const
60429+# define __do_const
60430+#endif
60431+
60432+#ifndef __size_overflow
60433+# define __size_overflow(...)
60434+#endif
60435 /*
60436 * Tell gcc if a function is cold. The compiler will assume any path
60437 * directly leading to the call is unlikely.
60438@@ -273,6 +317,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60439 #define __cold
60440 #endif
60441
60442+#ifndef __alloc_size
60443+#define __alloc_size(...)
60444+#endif
60445+
60446+#ifndef __bos
60447+#define __bos(ptr, arg)
60448+#endif
60449+
60450+#ifndef __bos0
60451+#define __bos0(ptr)
60452+#endif
60453+
60454+#ifndef __bos1
60455+#define __bos1(ptr)
60456+#endif
60457+
60458 /* Simple shorthand for a section definition */
60459 #ifndef __section
60460 # define __section(S) __attribute__ ((__section__(#S)))
60461@@ -308,6 +368,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60462 * use is to mediate communication between process-level code and irq/NMI
60463 * handlers, all running on the same CPU.
60464 */
60465-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
60466+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
60467+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
60468
60469 #endif /* __LINUX_COMPILER_H */
60470diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
60471index e9eaec5..bfeb9bb 100644
60472--- a/include/linux/cpuset.h
60473+++ b/include/linux/cpuset.h
60474@@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
60475 * nodemask.
60476 */
60477 smp_mb();
60478- --ACCESS_ONCE(current->mems_allowed_change_disable);
60479+ --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
60480 }
60481
60482 static inline void set_mems_allowed(nodemask_t nodemask)
60483diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
60484index b936763..48685ee 100644
60485--- a/include/linux/crash_dump.h
60486+++ b/include/linux/crash_dump.h
60487@@ -14,7 +14,7 @@ extern unsigned long long elfcorehdr_addr;
60488 extern unsigned long long elfcorehdr_size;
60489
60490 extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
60491- unsigned long, int);
60492+ unsigned long, int) __size_overflow(3);
60493
60494 /* Architecture code defines this if there are other possible ELF
60495 * machine types, e.g. on bi-arch capable hardware. */
60496diff --git a/include/linux/cred.h b/include/linux/cred.h
60497index adadf71..6af5560 100644
60498--- a/include/linux/cred.h
60499+++ b/include/linux/cred.h
60500@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
60501 static inline void validate_process_creds(void)
60502 {
60503 }
60504+static inline void validate_task_creds(struct task_struct *task)
60505+{
60506+}
60507 #endif
60508
60509 /**
60510diff --git a/include/linux/crypto.h b/include/linux/crypto.h
60511index 8a94217..15d49e3 100644
60512--- a/include/linux/crypto.h
60513+++ b/include/linux/crypto.h
60514@@ -365,7 +365,7 @@ struct cipher_tfm {
60515 const u8 *key, unsigned int keylen);
60516 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60517 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60518-};
60519+} __no_const;
60520
60521 struct hash_tfm {
60522 int (*init)(struct hash_desc *desc);
60523@@ -386,13 +386,13 @@ struct compress_tfm {
60524 int (*cot_decompress)(struct crypto_tfm *tfm,
60525 const u8 *src, unsigned int slen,
60526 u8 *dst, unsigned int *dlen);
60527-};
60528+} __no_const;
60529
60530 struct rng_tfm {
60531 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
60532 unsigned int dlen);
60533 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
60534-};
60535+} __no_const;
60536
60537 #define crt_ablkcipher crt_u.ablkcipher
60538 #define crt_aead crt_u.aead
60539diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
60540index 7925bf0..d5143d2 100644
60541--- a/include/linux/decompress/mm.h
60542+++ b/include/linux/decompress/mm.h
60543@@ -77,7 +77,7 @@ static void free(void *where)
60544 * warnings when not needed (indeed large_malloc / large_free are not
60545 * needed by inflate */
60546
60547-#define malloc(a) kmalloc(a, GFP_KERNEL)
60548+#define malloc(a) kmalloc((a), GFP_KERNEL)
60549 #define free(a) kfree(a)
60550
60551 #define large_malloc(a) vmalloc(a)
60552diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
60553index e13117c..e9fc938 100644
60554--- a/include/linux/dma-mapping.h
60555+++ b/include/linux/dma-mapping.h
60556@@ -46,7 +46,7 @@ struct dma_map_ops {
60557 u64 (*get_required_mask)(struct device *dev);
60558 #endif
60559 int is_phys;
60560-};
60561+} __do_const;
60562
60563 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
60564
60565diff --git a/include/linux/efi.h b/include/linux/efi.h
60566index 37c3007..92ab679 100644
60567--- a/include/linux/efi.h
60568+++ b/include/linux/efi.h
60569@@ -580,7 +580,7 @@ struct efivar_operations {
60570 efi_get_variable_t *get_variable;
60571 efi_get_next_variable_t *get_next_variable;
60572 efi_set_variable_t *set_variable;
60573-};
60574+} __no_const;
60575
60576 struct efivars {
60577 /*
60578diff --git a/include/linux/elf.h b/include/linux/elf.h
60579index 999b4f5..57753b4 100644
60580--- a/include/linux/elf.h
60581+++ b/include/linux/elf.h
60582@@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
60583 #define PT_GNU_EH_FRAME 0x6474e550
60584
60585 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
60586+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
60587+
60588+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
60589+
60590+/* Constants for the e_flags field */
60591+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60592+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
60593+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
60594+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
60595+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60596+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60597
60598 /*
60599 * Extended Numbering
60600@@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
60601 #define DT_DEBUG 21
60602 #define DT_TEXTREL 22
60603 #define DT_JMPREL 23
60604+#define DT_FLAGS 30
60605+ #define DF_TEXTREL 0x00000004
60606 #define DT_ENCODING 32
60607 #define OLD_DT_LOOS 0x60000000
60608 #define DT_LOOS 0x6000000d
60609@@ -243,6 +256,19 @@ typedef struct elf64_hdr {
60610 #define PF_W 0x2
60611 #define PF_X 0x1
60612
60613+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
60614+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
60615+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
60616+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
60617+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
60618+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
60619+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
60620+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
60621+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
60622+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
60623+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
60624+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
60625+
60626 typedef struct elf32_phdr{
60627 Elf32_Word p_type;
60628 Elf32_Off p_offset;
60629@@ -335,6 +361,8 @@ typedef struct elf64_shdr {
60630 #define EI_OSABI 7
60631 #define EI_PAD 8
60632
60633+#define EI_PAX 14
60634+
60635 #define ELFMAG0 0x7f /* EI_MAG */
60636 #define ELFMAG1 'E'
60637 #define ELFMAG2 'L'
60638@@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
60639 #define elf_note elf32_note
60640 #define elf_addr_t Elf32_Off
60641 #define Elf_Half Elf32_Half
60642+#define elf_dyn Elf32_Dyn
60643
60644 #else
60645
60646@@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
60647 #define elf_note elf64_note
60648 #define elf_addr_t Elf64_Off
60649 #define Elf_Half Elf64_Half
60650+#define elf_dyn Elf64_Dyn
60651
60652 #endif
60653
60654diff --git a/include/linux/filter.h b/include/linux/filter.h
60655index 8eeb205..d59bfa2 100644
60656--- a/include/linux/filter.h
60657+++ b/include/linux/filter.h
60658@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
60659
60660 struct sk_buff;
60661 struct sock;
60662+struct bpf_jit_work;
60663
60664 struct sk_filter
60665 {
60666@@ -141,6 +142,9 @@ struct sk_filter
60667 unsigned int len; /* Number of filter blocks */
60668 unsigned int (*bpf_func)(const struct sk_buff *skb,
60669 const struct sock_filter *filter);
60670+#ifdef CONFIG_BPF_JIT
60671+ struct bpf_jit_work *work;
60672+#endif
60673 struct rcu_head rcu;
60674 struct sock_filter insns[0];
60675 };
60676diff --git a/include/linux/firewire.h b/include/linux/firewire.h
60677index 84ccf8e..2e9b14c 100644
60678--- a/include/linux/firewire.h
60679+++ b/include/linux/firewire.h
60680@@ -428,7 +428,7 @@ struct fw_iso_context {
60681 union {
60682 fw_iso_callback_t sc;
60683 fw_iso_mc_callback_t mc;
60684- } callback;
60685+ } __no_const callback;
60686 void *callback_data;
60687 };
60688
60689diff --git a/include/linux/fs.h b/include/linux/fs.h
60690index 69cd5bb..58425c2 100644
60691--- a/include/linux/fs.h
60692+++ b/include/linux/fs.h
60693@@ -1623,7 +1623,8 @@ struct file_operations {
60694 int (*setlease)(struct file *, long, struct file_lock **);
60695 long (*fallocate)(struct file *file, int mode, loff_t offset,
60696 loff_t len);
60697-};
60698+} __do_const;
60699+typedef struct file_operations __no_const file_operations_no_const;
60700
60701 struct inode_operations {
60702 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
60703diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
60704index 003dc0f..3c4ea97 100644
60705--- a/include/linux/fs_struct.h
60706+++ b/include/linux/fs_struct.h
60707@@ -6,7 +6,7 @@
60708 #include <linux/seqlock.h>
60709
60710 struct fs_struct {
60711- int users;
60712+ atomic_t users;
60713 spinlock_t lock;
60714 seqcount_t seq;
60715 int umask;
60716diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
60717index ce31408..b1ad003 100644
60718--- a/include/linux/fscache-cache.h
60719+++ b/include/linux/fscache-cache.h
60720@@ -102,7 +102,7 @@ struct fscache_operation {
60721 fscache_operation_release_t release;
60722 };
60723
60724-extern atomic_t fscache_op_debug_id;
60725+extern atomic_unchecked_t fscache_op_debug_id;
60726 extern void fscache_op_work_func(struct work_struct *work);
60727
60728 extern void fscache_enqueue_operation(struct fscache_operation *);
60729@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
60730 {
60731 INIT_WORK(&op->work, fscache_op_work_func);
60732 atomic_set(&op->usage, 1);
60733- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60734+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60735 op->processor = processor;
60736 op->release = release;
60737 INIT_LIST_HEAD(&op->pend_link);
60738diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
60739index 2a53f10..0187fdf 100644
60740--- a/include/linux/fsnotify.h
60741+++ b/include/linux/fsnotify.h
60742@@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
60743 */
60744 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
60745 {
60746- return kstrdup(name, GFP_KERNEL);
60747+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
60748 }
60749
60750 /*
60751diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
60752index 91d0e0a3..035666b 100644
60753--- a/include/linux/fsnotify_backend.h
60754+++ b/include/linux/fsnotify_backend.h
60755@@ -105,6 +105,7 @@ struct fsnotify_ops {
60756 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
60757 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
60758 };
60759+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
60760
60761 /*
60762 * A group is a "thing" that wants to receive notification about filesystem
60763diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
60764index c3da42d..c70e0df 100644
60765--- a/include/linux/ftrace_event.h
60766+++ b/include/linux/ftrace_event.h
60767@@ -97,7 +97,7 @@ struct trace_event_functions {
60768 trace_print_func raw;
60769 trace_print_func hex;
60770 trace_print_func binary;
60771-};
60772+} __no_const;
60773
60774 struct trace_event {
60775 struct hlist_node node;
60776@@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
60777 extern int trace_add_event_call(struct ftrace_event_call *call);
60778 extern void trace_remove_event_call(struct ftrace_event_call *call);
60779
60780-#define is_signed_type(type) (((type)(-1)) < 0)
60781+#define is_signed_type(type) (((type)(-1)) < (type)1)
60782
60783 int trace_set_clr_event(const char *system, const char *event, int set);
60784
60785diff --git a/include/linux/genhd.h b/include/linux/genhd.h
60786index e61d319..0da8505 100644
60787--- a/include/linux/genhd.h
60788+++ b/include/linux/genhd.h
60789@@ -185,7 +185,7 @@ struct gendisk {
60790 struct kobject *slave_dir;
60791
60792 struct timer_rand_state *random;
60793- atomic_t sync_io; /* RAID */
60794+ atomic_unchecked_t sync_io; /* RAID */
60795 struct disk_events *ev;
60796 #ifdef CONFIG_BLK_DEV_INTEGRITY
60797 struct blk_integrity *integrity;
60798diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60799new file mode 100644
60800index 0000000..8a130b6
60801--- /dev/null
60802+++ b/include/linux/gracl.h
60803@@ -0,0 +1,319 @@
60804+#ifndef GR_ACL_H
60805+#define GR_ACL_H
60806+
60807+#include <linux/grdefs.h>
60808+#include <linux/resource.h>
60809+#include <linux/capability.h>
60810+#include <linux/dcache.h>
60811+#include <asm/resource.h>
60812+
60813+/* Major status information */
60814+
60815+#define GR_VERSION "grsecurity 2.9"
60816+#define GRSECURITY_VERSION 0x2900
60817+
60818+enum {
60819+ GR_SHUTDOWN = 0,
60820+ GR_ENABLE = 1,
60821+ GR_SPROLE = 2,
60822+ GR_RELOAD = 3,
60823+ GR_SEGVMOD = 4,
60824+ GR_STATUS = 5,
60825+ GR_UNSPROLE = 6,
60826+ GR_PASSSET = 7,
60827+ GR_SPROLEPAM = 8,
60828+};
60829+
60830+/* Password setup definitions
60831+ * kernel/grhash.c */
60832+enum {
60833+ GR_PW_LEN = 128,
60834+ GR_SALT_LEN = 16,
60835+ GR_SHA_LEN = 32,
60836+};
60837+
60838+enum {
60839+ GR_SPROLE_LEN = 64,
60840+};
60841+
60842+enum {
60843+ GR_NO_GLOB = 0,
60844+ GR_REG_GLOB,
60845+ GR_CREATE_GLOB
60846+};
60847+
60848+#define GR_NLIMITS 32
60849+
60850+/* Begin Data Structures */
60851+
60852+struct sprole_pw {
60853+ unsigned char *rolename;
60854+ unsigned char salt[GR_SALT_LEN];
60855+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60856+};
60857+
60858+struct name_entry {
60859+ __u32 key;
60860+ ino_t inode;
60861+ dev_t device;
60862+ char *name;
60863+ __u16 len;
60864+ __u8 deleted;
60865+ struct name_entry *prev;
60866+ struct name_entry *next;
60867+};
60868+
60869+struct inodev_entry {
60870+ struct name_entry *nentry;
60871+ struct inodev_entry *prev;
60872+ struct inodev_entry *next;
60873+};
60874+
60875+struct acl_role_db {
60876+ struct acl_role_label **r_hash;
60877+ __u32 r_size;
60878+};
60879+
60880+struct inodev_db {
60881+ struct inodev_entry **i_hash;
60882+ __u32 i_size;
60883+};
60884+
60885+struct name_db {
60886+ struct name_entry **n_hash;
60887+ __u32 n_size;
60888+};
60889+
60890+struct crash_uid {
60891+ uid_t uid;
60892+ unsigned long expires;
60893+};
60894+
60895+struct gr_hash_struct {
60896+ void **table;
60897+ void **nametable;
60898+ void *first;
60899+ __u32 table_size;
60900+ __u32 used_size;
60901+ int type;
60902+};
60903+
60904+/* Userspace Grsecurity ACL data structures */
60905+
60906+struct acl_subject_label {
60907+ char *filename;
60908+ ino_t inode;
60909+ dev_t device;
60910+ __u32 mode;
60911+ kernel_cap_t cap_mask;
60912+ kernel_cap_t cap_lower;
60913+ kernel_cap_t cap_invert_audit;
60914+
60915+ struct rlimit res[GR_NLIMITS];
60916+ __u32 resmask;
60917+
60918+ __u8 user_trans_type;
60919+ __u8 group_trans_type;
60920+ uid_t *user_transitions;
60921+ gid_t *group_transitions;
60922+ __u16 user_trans_num;
60923+ __u16 group_trans_num;
60924+
60925+ __u32 sock_families[2];
60926+ __u32 ip_proto[8];
60927+ __u32 ip_type;
60928+ struct acl_ip_label **ips;
60929+ __u32 ip_num;
60930+ __u32 inaddr_any_override;
60931+
60932+ __u32 crashes;
60933+ unsigned long expires;
60934+
60935+ struct acl_subject_label *parent_subject;
60936+ struct gr_hash_struct *hash;
60937+ struct acl_subject_label *prev;
60938+ struct acl_subject_label *next;
60939+
60940+ struct acl_object_label **obj_hash;
60941+ __u32 obj_hash_size;
60942+ __u16 pax_flags;
60943+};
60944+
60945+struct role_allowed_ip {
60946+ __u32 addr;
60947+ __u32 netmask;
60948+
60949+ struct role_allowed_ip *prev;
60950+ struct role_allowed_ip *next;
60951+};
60952+
60953+struct role_transition {
60954+ char *rolename;
60955+
60956+ struct role_transition *prev;
60957+ struct role_transition *next;
60958+};
60959+
60960+struct acl_role_label {
60961+ char *rolename;
60962+ uid_t uidgid;
60963+ __u16 roletype;
60964+
60965+ __u16 auth_attempts;
60966+ unsigned long expires;
60967+
60968+ struct acl_subject_label *root_label;
60969+ struct gr_hash_struct *hash;
60970+
60971+ struct acl_role_label *prev;
60972+ struct acl_role_label *next;
60973+
60974+ struct role_transition *transitions;
60975+ struct role_allowed_ip *allowed_ips;
60976+ uid_t *domain_children;
60977+ __u16 domain_child_num;
60978+
60979+ umode_t umask;
60980+
60981+ struct acl_subject_label **subj_hash;
60982+ __u32 subj_hash_size;
60983+};
60984+
60985+struct user_acl_role_db {
60986+ struct acl_role_label **r_table;
60987+ __u32 num_pointers; /* Number of allocations to track */
60988+ __u32 num_roles; /* Number of roles */
60989+ __u32 num_domain_children; /* Number of domain children */
60990+ __u32 num_subjects; /* Number of subjects */
60991+ __u32 num_objects; /* Number of objects */
60992+};
60993+
60994+struct acl_object_label {
60995+ char *filename;
60996+ ino_t inode;
60997+ dev_t device;
60998+ __u32 mode;
60999+
61000+ struct acl_subject_label *nested;
61001+ struct acl_object_label *globbed;
61002+
61003+ /* next two structures not used */
61004+
61005+ struct acl_object_label *prev;
61006+ struct acl_object_label *next;
61007+};
61008+
61009+struct acl_ip_label {
61010+ char *iface;
61011+ __u32 addr;
61012+ __u32 netmask;
61013+ __u16 low, high;
61014+ __u8 mode;
61015+ __u32 type;
61016+ __u32 proto[8];
61017+
61018+ /* next two structures not used */
61019+
61020+ struct acl_ip_label *prev;
61021+ struct acl_ip_label *next;
61022+};
61023+
61024+struct gr_arg {
61025+ struct user_acl_role_db role_db;
61026+ unsigned char pw[GR_PW_LEN];
61027+ unsigned char salt[GR_SALT_LEN];
61028+ unsigned char sum[GR_SHA_LEN];
61029+ unsigned char sp_role[GR_SPROLE_LEN];
61030+ struct sprole_pw *sprole_pws;
61031+ dev_t segv_device;
61032+ ino_t segv_inode;
61033+ uid_t segv_uid;
61034+ __u16 num_sprole_pws;
61035+ __u16 mode;
61036+};
61037+
61038+struct gr_arg_wrapper {
61039+ struct gr_arg *arg;
61040+ __u32 version;
61041+ __u32 size;
61042+};
61043+
61044+struct subject_map {
61045+ struct acl_subject_label *user;
61046+ struct acl_subject_label *kernel;
61047+ struct subject_map *prev;
61048+ struct subject_map *next;
61049+};
61050+
61051+struct acl_subj_map_db {
61052+ struct subject_map **s_hash;
61053+ __u32 s_size;
61054+};
61055+
61056+/* End Data Structures Section */
61057+
61058+/* Hash functions generated by empirical testing by Brad Spengler
61059+ Makes good use of the low bits of the inode. Generally 0-1 times
61060+ in loop for successful match. 0-3 for unsuccessful match.
61061+ Shift/add algorithm with modulus of table size and an XOR*/
61062+
61063+static __inline__ unsigned int
61064+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
61065+{
61066+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
61067+}
61068+
61069+ static __inline__ unsigned int
61070+shash(const struct acl_subject_label *userp, const unsigned int sz)
61071+{
61072+ return ((const unsigned long)userp % sz);
61073+}
61074+
61075+static __inline__ unsigned int
61076+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
61077+{
61078+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
61079+}
61080+
61081+static __inline__ unsigned int
61082+nhash(const char *name, const __u16 len, const unsigned int sz)
61083+{
61084+ return full_name_hash((const unsigned char *)name, len) % sz;
61085+}
61086+
61087+#define FOR_EACH_ROLE_START(role) \
61088+ role = role_list; \
61089+ while (role) {
61090+
61091+#define FOR_EACH_ROLE_END(role) \
61092+ role = role->prev; \
61093+ }
61094+
61095+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
61096+ subj = NULL; \
61097+ iter = 0; \
61098+ while (iter < role->subj_hash_size) { \
61099+ if (subj == NULL) \
61100+ subj = role->subj_hash[iter]; \
61101+ if (subj == NULL) { \
61102+ iter++; \
61103+ continue; \
61104+ }
61105+
61106+#define FOR_EACH_SUBJECT_END(subj,iter) \
61107+ subj = subj->next; \
61108+ if (subj == NULL) \
61109+ iter++; \
61110+ }
61111+
61112+
61113+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
61114+ subj = role->hash->first; \
61115+ while (subj != NULL) {
61116+
61117+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
61118+ subj = subj->next; \
61119+ }
61120+
61121+#endif
61122+
61123diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
61124new file mode 100644
61125index 0000000..323ecf2
61126--- /dev/null
61127+++ b/include/linux/gralloc.h
61128@@ -0,0 +1,9 @@
61129+#ifndef __GRALLOC_H
61130+#define __GRALLOC_H
61131+
61132+void acl_free_all(void);
61133+int acl_alloc_stack_init(unsigned long size);
61134+void *acl_alloc(unsigned long len);
61135+void *acl_alloc_num(unsigned long num, unsigned long len);
61136+
61137+#endif
61138diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
61139new file mode 100644
61140index 0000000..b30e9bc
61141--- /dev/null
61142+++ b/include/linux/grdefs.h
61143@@ -0,0 +1,140 @@
61144+#ifndef GRDEFS_H
61145+#define GRDEFS_H
61146+
61147+/* Begin grsecurity status declarations */
61148+
61149+enum {
61150+ GR_READY = 0x01,
61151+ GR_STATUS_INIT = 0x00 // disabled state
61152+};
61153+
61154+/* Begin ACL declarations */
61155+
61156+/* Role flags */
61157+
61158+enum {
61159+ GR_ROLE_USER = 0x0001,
61160+ GR_ROLE_GROUP = 0x0002,
61161+ GR_ROLE_DEFAULT = 0x0004,
61162+ GR_ROLE_SPECIAL = 0x0008,
61163+ GR_ROLE_AUTH = 0x0010,
61164+ GR_ROLE_NOPW = 0x0020,
61165+ GR_ROLE_GOD = 0x0040,
61166+ GR_ROLE_LEARN = 0x0080,
61167+ GR_ROLE_TPE = 0x0100,
61168+ GR_ROLE_DOMAIN = 0x0200,
61169+ GR_ROLE_PAM = 0x0400,
61170+ GR_ROLE_PERSIST = 0x0800
61171+};
61172+
61173+/* ACL Subject and Object mode flags */
61174+enum {
61175+ GR_DELETED = 0x80000000
61176+};
61177+
61178+/* ACL Object-only mode flags */
61179+enum {
61180+ GR_READ = 0x00000001,
61181+ GR_APPEND = 0x00000002,
61182+ GR_WRITE = 0x00000004,
61183+ GR_EXEC = 0x00000008,
61184+ GR_FIND = 0x00000010,
61185+ GR_INHERIT = 0x00000020,
61186+ GR_SETID = 0x00000040,
61187+ GR_CREATE = 0x00000080,
61188+ GR_DELETE = 0x00000100,
61189+ GR_LINK = 0x00000200,
61190+ GR_AUDIT_READ = 0x00000400,
61191+ GR_AUDIT_APPEND = 0x00000800,
61192+ GR_AUDIT_WRITE = 0x00001000,
61193+ GR_AUDIT_EXEC = 0x00002000,
61194+ GR_AUDIT_FIND = 0x00004000,
61195+ GR_AUDIT_INHERIT= 0x00008000,
61196+ GR_AUDIT_SETID = 0x00010000,
61197+ GR_AUDIT_CREATE = 0x00020000,
61198+ GR_AUDIT_DELETE = 0x00040000,
61199+ GR_AUDIT_LINK = 0x00080000,
61200+ GR_PTRACERD = 0x00100000,
61201+ GR_NOPTRACE = 0x00200000,
61202+ GR_SUPPRESS = 0x00400000,
61203+ GR_NOLEARN = 0x00800000,
61204+ GR_INIT_TRANSFER= 0x01000000
61205+};
61206+
61207+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
61208+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
61209+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
61210+
61211+/* ACL subject-only mode flags */
61212+enum {
61213+ GR_KILL = 0x00000001,
61214+ GR_VIEW = 0x00000002,
61215+ GR_PROTECTED = 0x00000004,
61216+ GR_LEARN = 0x00000008,
61217+ GR_OVERRIDE = 0x00000010,
61218+ /* just a placeholder, this mode is only used in userspace */
61219+ GR_DUMMY = 0x00000020,
61220+ GR_PROTSHM = 0x00000040,
61221+ GR_KILLPROC = 0x00000080,
61222+ GR_KILLIPPROC = 0x00000100,
61223+ /* just a placeholder, this mode is only used in userspace */
61224+ GR_NOTROJAN = 0x00000200,
61225+ GR_PROTPROCFD = 0x00000400,
61226+ GR_PROCACCT = 0x00000800,
61227+ GR_RELAXPTRACE = 0x00001000,
61228+ GR_NESTED = 0x00002000,
61229+ GR_INHERITLEARN = 0x00004000,
61230+ GR_PROCFIND = 0x00008000,
61231+ GR_POVERRIDE = 0x00010000,
61232+ GR_KERNELAUTH = 0x00020000,
61233+ GR_ATSECURE = 0x00040000,
61234+ GR_SHMEXEC = 0x00080000
61235+};
61236+
61237+enum {
61238+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
61239+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
61240+ GR_PAX_ENABLE_MPROTECT = 0x0004,
61241+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
61242+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
61243+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
61244+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
61245+ GR_PAX_DISABLE_MPROTECT = 0x0400,
61246+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
61247+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
61248+};
61249+
61250+enum {
61251+ GR_ID_USER = 0x01,
61252+ GR_ID_GROUP = 0x02,
61253+};
61254+
61255+enum {
61256+ GR_ID_ALLOW = 0x01,
61257+ GR_ID_DENY = 0x02,
61258+};
61259+
61260+#define GR_CRASH_RES 31
61261+#define GR_UIDTABLE_MAX 500
61262+
61263+/* begin resource learning section */
61264+enum {
61265+ GR_RLIM_CPU_BUMP = 60,
61266+ GR_RLIM_FSIZE_BUMP = 50000,
61267+ GR_RLIM_DATA_BUMP = 10000,
61268+ GR_RLIM_STACK_BUMP = 1000,
61269+ GR_RLIM_CORE_BUMP = 10000,
61270+ GR_RLIM_RSS_BUMP = 500000,
61271+ GR_RLIM_NPROC_BUMP = 1,
61272+ GR_RLIM_NOFILE_BUMP = 5,
61273+ GR_RLIM_MEMLOCK_BUMP = 50000,
61274+ GR_RLIM_AS_BUMP = 500000,
61275+ GR_RLIM_LOCKS_BUMP = 2,
61276+ GR_RLIM_SIGPENDING_BUMP = 5,
61277+ GR_RLIM_MSGQUEUE_BUMP = 10000,
61278+ GR_RLIM_NICE_BUMP = 1,
61279+ GR_RLIM_RTPRIO_BUMP = 1,
61280+ GR_RLIM_RTTIME_BUMP = 1000000
61281+};
61282+
61283+#endif
61284diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
61285new file mode 100644
61286index 0000000..da390f1
61287--- /dev/null
61288+++ b/include/linux/grinternal.h
61289@@ -0,0 +1,221 @@
61290+#ifndef __GRINTERNAL_H
61291+#define __GRINTERNAL_H
61292+
61293+#ifdef CONFIG_GRKERNSEC
61294+
61295+#include <linux/fs.h>
61296+#include <linux/mnt_namespace.h>
61297+#include <linux/nsproxy.h>
61298+#include <linux/gracl.h>
61299+#include <linux/grdefs.h>
61300+#include <linux/grmsg.h>
61301+
61302+void gr_add_learn_entry(const char *fmt, ...)
61303+ __attribute__ ((format (printf, 1, 2)));
61304+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
61305+ const struct vfsmount *mnt);
61306+__u32 gr_check_create(const struct dentry *new_dentry,
61307+ const struct dentry *parent,
61308+ const struct vfsmount *mnt, const __u32 mode);
61309+int gr_check_protected_task(const struct task_struct *task);
61310+__u32 to_gr_audit(const __u32 reqmode);
61311+int gr_set_acls(const int type);
61312+int gr_apply_subject_to_task(struct task_struct *task);
61313+int gr_acl_is_enabled(void);
61314+char gr_roletype_to_char(void);
61315+
61316+void gr_handle_alertkill(struct task_struct *task);
61317+char *gr_to_filename(const struct dentry *dentry,
61318+ const struct vfsmount *mnt);
61319+char *gr_to_filename1(const struct dentry *dentry,
61320+ const struct vfsmount *mnt);
61321+char *gr_to_filename2(const struct dentry *dentry,
61322+ const struct vfsmount *mnt);
61323+char *gr_to_filename3(const struct dentry *dentry,
61324+ const struct vfsmount *mnt);
61325+
61326+extern int grsec_enable_ptrace_readexec;
61327+extern int grsec_enable_harden_ptrace;
61328+extern int grsec_enable_link;
61329+extern int grsec_enable_fifo;
61330+extern int grsec_enable_execve;
61331+extern int grsec_enable_shm;
61332+extern int grsec_enable_execlog;
61333+extern int grsec_enable_signal;
61334+extern int grsec_enable_audit_ptrace;
61335+extern int grsec_enable_forkfail;
61336+extern int grsec_enable_time;
61337+extern int grsec_enable_rofs;
61338+extern int grsec_enable_chroot_shmat;
61339+extern int grsec_enable_chroot_mount;
61340+extern int grsec_enable_chroot_double;
61341+extern int grsec_enable_chroot_pivot;
61342+extern int grsec_enable_chroot_chdir;
61343+extern int grsec_enable_chroot_chmod;
61344+extern int grsec_enable_chroot_mknod;
61345+extern int grsec_enable_chroot_fchdir;
61346+extern int grsec_enable_chroot_nice;
61347+extern int grsec_enable_chroot_execlog;
61348+extern int grsec_enable_chroot_caps;
61349+extern int grsec_enable_chroot_sysctl;
61350+extern int grsec_enable_chroot_unix;
61351+extern int grsec_enable_tpe;
61352+extern int grsec_tpe_gid;
61353+extern int grsec_enable_tpe_all;
61354+extern int grsec_enable_tpe_invert;
61355+extern int grsec_enable_socket_all;
61356+extern int grsec_socket_all_gid;
61357+extern int grsec_enable_socket_client;
61358+extern int grsec_socket_client_gid;
61359+extern int grsec_enable_socket_server;
61360+extern int grsec_socket_server_gid;
61361+extern int grsec_audit_gid;
61362+extern int grsec_enable_group;
61363+extern int grsec_enable_audit_textrel;
61364+extern int grsec_enable_log_rwxmaps;
61365+extern int grsec_enable_mount;
61366+extern int grsec_enable_chdir;
61367+extern int grsec_resource_logging;
61368+extern int grsec_enable_blackhole;
61369+extern int grsec_lastack_retries;
61370+extern int grsec_enable_brute;
61371+extern int grsec_lock;
61372+
61373+extern spinlock_t grsec_alert_lock;
61374+extern unsigned long grsec_alert_wtime;
61375+extern unsigned long grsec_alert_fyet;
61376+
61377+extern spinlock_t grsec_audit_lock;
61378+
61379+extern rwlock_t grsec_exec_file_lock;
61380+
61381+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
61382+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
61383+ (tsk)->exec_file->f_vfsmnt) : "/")
61384+
61385+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
61386+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
61387+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61388+
61389+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
61390+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
61391+ (tsk)->exec_file->f_vfsmnt) : "/")
61392+
61393+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
61394+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
61395+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61396+
61397+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
61398+
61399+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
61400+
61401+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
61402+ (task)->pid, (cred)->uid, \
61403+ (cred)->euid, (cred)->gid, (cred)->egid, \
61404+ gr_parent_task_fullpath(task), \
61405+ (task)->real_parent->comm, (task)->real_parent->pid, \
61406+ (pcred)->uid, (pcred)->euid, \
61407+ (pcred)->gid, (pcred)->egid
61408+
61409+#define GR_CHROOT_CAPS {{ \
61410+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
61411+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
61412+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
61413+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
61414+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
61415+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
61416+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
61417+
61418+#define security_learn(normal_msg,args...) \
61419+({ \
61420+ read_lock(&grsec_exec_file_lock); \
61421+ gr_add_learn_entry(normal_msg "\n", ## args); \
61422+ read_unlock(&grsec_exec_file_lock); \
61423+})
61424+
61425+enum {
61426+ GR_DO_AUDIT,
61427+ GR_DONT_AUDIT,
61428+ /* used for non-audit messages that we shouldn't kill the task on */
61429+ GR_DONT_AUDIT_GOOD
61430+};
61431+
61432+enum {
61433+ GR_TTYSNIFF,
61434+ GR_RBAC,
61435+ GR_RBAC_STR,
61436+ GR_STR_RBAC,
61437+ GR_RBAC_MODE2,
61438+ GR_RBAC_MODE3,
61439+ GR_FILENAME,
61440+ GR_SYSCTL_HIDDEN,
61441+ GR_NOARGS,
61442+ GR_ONE_INT,
61443+ GR_ONE_INT_TWO_STR,
61444+ GR_ONE_STR,
61445+ GR_STR_INT,
61446+ GR_TWO_STR_INT,
61447+ GR_TWO_INT,
61448+ GR_TWO_U64,
61449+ GR_THREE_INT,
61450+ GR_FIVE_INT_TWO_STR,
61451+ GR_TWO_STR,
61452+ GR_THREE_STR,
61453+ GR_FOUR_STR,
61454+ GR_STR_FILENAME,
61455+ GR_FILENAME_STR,
61456+ GR_FILENAME_TWO_INT,
61457+ GR_FILENAME_TWO_INT_STR,
61458+ GR_TEXTREL,
61459+ GR_PTRACE,
61460+ GR_RESOURCE,
61461+ GR_CAP,
61462+ GR_SIG,
61463+ GR_SIG2,
61464+ GR_CRASH1,
61465+ GR_CRASH2,
61466+ GR_PSACCT,
61467+ GR_RWXMAP
61468+};
61469+
61470+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
61471+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
61472+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
61473+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
61474+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
61475+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
61476+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
61477+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
61478+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
61479+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
61480+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
61481+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
61482+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
61483+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
61484+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
61485+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
61486+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
61487+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
61488+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
61489+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
61490+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
61491+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
61492+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
61493+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
61494+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
61495+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
61496+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
61497+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
61498+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
61499+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
61500+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
61501+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
61502+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
61503+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
61504+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
61505+
61506+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
61507+
61508+#endif
61509+
61510+#endif
61511diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
61512new file mode 100644
61513index 0000000..ae576a1
61514--- /dev/null
61515+++ b/include/linux/grmsg.h
61516@@ -0,0 +1,109 @@
61517+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
61518+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
61519+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
61520+#define GR_STOPMOD_MSG "denied modification of module state by "
61521+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
61522+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
61523+#define GR_IOPERM_MSG "denied use of ioperm() by "
61524+#define GR_IOPL_MSG "denied use of iopl() by "
61525+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
61526+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
61527+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
61528+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
61529+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
61530+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
61531+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
61532+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
61533+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
61534+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
61535+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
61536+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
61537+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
61538+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
61539+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
61540+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
61541+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
61542+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
61543+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
61544+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
61545+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
61546+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
61547+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
61548+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
61549+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
61550+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
61551+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
61552+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
61553+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
61554+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
61555+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
61556+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
61557+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
61558+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
61559+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
61560+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
61561+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
61562+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
61563+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
61564+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
61565+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
61566+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
61567+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
61568+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
61569+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
61570+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
61571+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
61572+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
61573+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
61574+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
61575+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
61576+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
61577+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
61578+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
61579+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
61580+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
61581+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
61582+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
61583+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
61584+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
61585+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
61586+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
61587+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
61588+#define GR_FAILFORK_MSG "failed fork with errno %s by "
61589+#define GR_NICE_CHROOT_MSG "denied priority change by "
61590+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
61591+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
61592+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
61593+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
61594+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
61595+#define GR_TIME_MSG "time set by "
61596+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
61597+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
61598+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
61599+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
61600+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
61601+#define GR_BIND_MSG "denied bind() by "
61602+#define GR_CONNECT_MSG "denied connect() by "
61603+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
61604+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
61605+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
61606+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
61607+#define GR_CAP_ACL_MSG "use of %s denied for "
61608+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
61609+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
61610+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
61611+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
61612+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
61613+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
61614+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
61615+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
61616+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
61617+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
61618+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
61619+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
61620+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
61621+#define GR_VM86_MSG "denied use of vm86 by "
61622+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
61623+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
61624+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
61625+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
61626diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
61627new file mode 100644
61628index 0000000..acd05db
61629--- /dev/null
61630+++ b/include/linux/grsecurity.h
61631@@ -0,0 +1,232 @@
61632+#ifndef GR_SECURITY_H
61633+#define GR_SECURITY_H
61634+#include <linux/fs.h>
61635+#include <linux/fs_struct.h>
61636+#include <linux/binfmts.h>
61637+#include <linux/gracl.h>
61638+
61639+/* notify of brain-dead configs */
61640+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61641+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
61642+#endif
61643+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
61644+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
61645+#endif
61646+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
61647+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
61648+#endif
61649+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
61650+#error "CONFIG_PAX enabled, but no PaX options are enabled."
61651+#endif
61652+
61653+#include <linux/compat.h>
61654+
61655+struct user_arg_ptr {
61656+#ifdef CONFIG_COMPAT
61657+ bool is_compat;
61658+#endif
61659+ union {
61660+ const char __user *const __user *native;
61661+#ifdef CONFIG_COMPAT
61662+ compat_uptr_t __user *compat;
61663+#endif
61664+ } ptr;
61665+};
61666+
61667+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
61668+void gr_handle_brute_check(void);
61669+void gr_handle_kernel_exploit(void);
61670+int gr_process_user_ban(void);
61671+
61672+char gr_roletype_to_char(void);
61673+
61674+int gr_acl_enable_at_secure(void);
61675+
61676+int gr_check_user_change(int real, int effective, int fs);
61677+int gr_check_group_change(int real, int effective, int fs);
61678+
61679+void gr_del_task_from_ip_table(struct task_struct *p);
61680+
61681+int gr_pid_is_chrooted(struct task_struct *p);
61682+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
61683+int gr_handle_chroot_nice(void);
61684+int gr_handle_chroot_sysctl(const int op);
61685+int gr_handle_chroot_setpriority(struct task_struct *p,
61686+ const int niceval);
61687+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
61688+int gr_handle_chroot_chroot(const struct dentry *dentry,
61689+ const struct vfsmount *mnt);
61690+void gr_handle_chroot_chdir(struct path *path);
61691+int gr_handle_chroot_chmod(const struct dentry *dentry,
61692+ const struct vfsmount *mnt, const int mode);
61693+int gr_handle_chroot_mknod(const struct dentry *dentry,
61694+ const struct vfsmount *mnt, const int mode);
61695+int gr_handle_chroot_mount(const struct dentry *dentry,
61696+ const struct vfsmount *mnt,
61697+ const char *dev_name);
61698+int gr_handle_chroot_pivot(void);
61699+int gr_handle_chroot_unix(const pid_t pid);
61700+
61701+int gr_handle_rawio(const struct inode *inode);
61702+
61703+void gr_handle_ioperm(void);
61704+void gr_handle_iopl(void);
61705+
61706+umode_t gr_acl_umask(void);
61707+
61708+int gr_tpe_allow(const struct file *file);
61709+
61710+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
61711+void gr_clear_chroot_entries(struct task_struct *task);
61712+
61713+void gr_log_forkfail(const int retval);
61714+void gr_log_timechange(void);
61715+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
61716+void gr_log_chdir(const struct dentry *dentry,
61717+ const struct vfsmount *mnt);
61718+void gr_log_chroot_exec(const struct dentry *dentry,
61719+ const struct vfsmount *mnt);
61720+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
61721+void gr_log_remount(const char *devname, const int retval);
61722+void gr_log_unmount(const char *devname, const int retval);
61723+void gr_log_mount(const char *from, const char *to, const int retval);
61724+void gr_log_textrel(struct vm_area_struct *vma);
61725+void gr_log_rwxmmap(struct file *file);
61726+void gr_log_rwxmprotect(struct file *file);
61727+
61728+int gr_handle_follow_link(const struct inode *parent,
61729+ const struct inode *inode,
61730+ const struct dentry *dentry,
61731+ const struct vfsmount *mnt);
61732+int gr_handle_fifo(const struct dentry *dentry,
61733+ const struct vfsmount *mnt,
61734+ const struct dentry *dir, const int flag,
61735+ const int acc_mode);
61736+int gr_handle_hardlink(const struct dentry *dentry,
61737+ const struct vfsmount *mnt,
61738+ struct inode *inode,
61739+ const int mode, const char *to);
61740+
61741+int gr_is_capable(const int cap);
61742+int gr_is_capable_nolog(const int cap);
61743+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
61744+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
61745+
61746+void gr_learn_resource(const struct task_struct *task, const int limit,
61747+ const unsigned long wanted, const int gt);
61748+void gr_copy_label(struct task_struct *tsk);
61749+void gr_handle_crash(struct task_struct *task, const int sig);
61750+int gr_handle_signal(const struct task_struct *p, const int sig);
61751+int gr_check_crash_uid(const uid_t uid);
61752+int gr_check_protected_task(const struct task_struct *task);
61753+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
61754+int gr_acl_handle_mmap(const struct file *file,
61755+ const unsigned long prot);
61756+int gr_acl_handle_mprotect(const struct file *file,
61757+ const unsigned long prot);
61758+int gr_check_hidden_task(const struct task_struct *tsk);
61759+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
61760+ const struct vfsmount *mnt);
61761+__u32 gr_acl_handle_utime(const struct dentry *dentry,
61762+ const struct vfsmount *mnt);
61763+__u32 gr_acl_handle_access(const struct dentry *dentry,
61764+ const struct vfsmount *mnt, const int fmode);
61765+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
61766+ const struct vfsmount *mnt, umode_t *mode);
61767+__u32 gr_acl_handle_chown(const struct dentry *dentry,
61768+ const struct vfsmount *mnt);
61769+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
61770+ const struct vfsmount *mnt);
61771+int gr_handle_ptrace(struct task_struct *task, const long request);
61772+int gr_handle_proc_ptrace(struct task_struct *task);
61773+__u32 gr_acl_handle_execve(const struct dentry *dentry,
61774+ const struct vfsmount *mnt);
61775+int gr_check_crash_exec(const struct file *filp);
61776+int gr_acl_is_enabled(void);
61777+void gr_set_kernel_label(struct task_struct *task);
61778+void gr_set_role_label(struct task_struct *task, const uid_t uid,
61779+ const gid_t gid);
61780+int gr_set_proc_label(const struct dentry *dentry,
61781+ const struct vfsmount *mnt,
61782+ const int unsafe_flags);
61783+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61784+ const struct vfsmount *mnt);
61785+__u32 gr_acl_handle_open(const struct dentry *dentry,
61786+ const struct vfsmount *mnt, int acc_mode);
61787+__u32 gr_acl_handle_creat(const struct dentry *dentry,
61788+ const struct dentry *p_dentry,
61789+ const struct vfsmount *p_mnt,
61790+ int open_flags, int acc_mode, const int imode);
61791+void gr_handle_create(const struct dentry *dentry,
61792+ const struct vfsmount *mnt);
61793+void gr_handle_proc_create(const struct dentry *dentry,
61794+ const struct inode *inode);
61795+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61796+ const struct dentry *parent_dentry,
61797+ const struct vfsmount *parent_mnt,
61798+ const int mode);
61799+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61800+ const struct dentry *parent_dentry,
61801+ const struct vfsmount *parent_mnt);
61802+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61803+ const struct vfsmount *mnt);
61804+void gr_handle_delete(const ino_t ino, const dev_t dev);
61805+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61806+ const struct vfsmount *mnt);
61807+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61808+ const struct dentry *parent_dentry,
61809+ const struct vfsmount *parent_mnt,
61810+ const char *from);
61811+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61812+ const struct dentry *parent_dentry,
61813+ const struct vfsmount *parent_mnt,
61814+ const struct dentry *old_dentry,
61815+ const struct vfsmount *old_mnt, const char *to);
61816+int gr_acl_handle_rename(struct dentry *new_dentry,
61817+ struct dentry *parent_dentry,
61818+ const struct vfsmount *parent_mnt,
61819+ struct dentry *old_dentry,
61820+ struct inode *old_parent_inode,
61821+ struct vfsmount *old_mnt, const char *newname);
61822+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61823+ struct dentry *old_dentry,
61824+ struct dentry *new_dentry,
61825+ struct vfsmount *mnt, const __u8 replace);
61826+__u32 gr_check_link(const struct dentry *new_dentry,
61827+ const struct dentry *parent_dentry,
61828+ const struct vfsmount *parent_mnt,
61829+ const struct dentry *old_dentry,
61830+ const struct vfsmount *old_mnt);
61831+int gr_acl_handle_filldir(const struct file *file, const char *name,
61832+ const unsigned int namelen, const ino_t ino);
61833+
61834+__u32 gr_acl_handle_unix(const struct dentry *dentry,
61835+ const struct vfsmount *mnt);
61836+void gr_acl_handle_exit(void);
61837+void gr_acl_handle_psacct(struct task_struct *task, const long code);
61838+int gr_acl_handle_procpidmem(const struct task_struct *task);
61839+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61840+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61841+void gr_audit_ptrace(struct task_struct *task);
61842+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
61843+
61844+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
61845+
61846+#ifdef CONFIG_GRKERNSEC
61847+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
61848+void gr_handle_vm86(void);
61849+void gr_handle_mem_readwrite(u64 from, u64 to);
61850+
61851+void gr_log_badprocpid(const char *entry);
61852+
61853+extern int grsec_enable_dmesg;
61854+extern int grsec_disable_privio;
61855+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61856+extern int grsec_enable_chroot_findtask;
61857+#endif
61858+#ifdef CONFIG_GRKERNSEC_SETXID
61859+extern int grsec_enable_setxid;
61860+#endif
61861+#endif
61862+
61863+#endif
61864diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61865new file mode 100644
61866index 0000000..e7ffaaf
61867--- /dev/null
61868+++ b/include/linux/grsock.h
61869@@ -0,0 +1,19 @@
61870+#ifndef __GRSOCK_H
61871+#define __GRSOCK_H
61872+
61873+extern void gr_attach_curr_ip(const struct sock *sk);
61874+extern int gr_handle_sock_all(const int family, const int type,
61875+ const int protocol);
61876+extern int gr_handle_sock_server(const struct sockaddr *sck);
61877+extern int gr_handle_sock_server_other(const struct sock *sck);
61878+extern int gr_handle_sock_client(const struct sockaddr *sck);
61879+extern int gr_search_connect(struct socket * sock,
61880+ struct sockaddr_in * addr);
61881+extern int gr_search_bind(struct socket * sock,
61882+ struct sockaddr_in * addr);
61883+extern int gr_search_listen(struct socket * sock);
61884+extern int gr_search_accept(struct socket * sock);
61885+extern int gr_search_socket(const int domain, const int type,
61886+ const int protocol);
61887+
61888+#endif
61889diff --git a/include/linux/hid.h b/include/linux/hid.h
61890index 3a95da6..51986f1 100644
61891--- a/include/linux/hid.h
61892+++ b/include/linux/hid.h
61893@@ -696,7 +696,7 @@ struct hid_ll_driver {
61894 unsigned int code, int value);
61895
61896 int (*parse)(struct hid_device *hdev);
61897-};
61898+} __no_const;
61899
61900 #define PM_HINT_FULLON 1<<5
61901 #define PM_HINT_NORMAL 1<<1
61902diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61903index 3a93f73..b19d0b3 100644
61904--- a/include/linux/highmem.h
61905+++ b/include/linux/highmem.h
61906@@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
61907 kunmap_atomic(kaddr, KM_USER0);
61908 }
61909
61910+static inline void sanitize_highpage(struct page *page)
61911+{
61912+ void *kaddr;
61913+ unsigned long flags;
61914+
61915+ local_irq_save(flags);
61916+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
61917+ clear_page(kaddr);
61918+ kunmap_atomic(kaddr, KM_CLEARPAGE);
61919+ local_irq_restore(flags);
61920+}
61921+
61922 static inline void zero_user_segments(struct page *page,
61923 unsigned start1, unsigned end1,
61924 unsigned start2, unsigned end2)
61925diff --git a/include/linux/i2c.h b/include/linux/i2c.h
61926index 8e25a91..551b161 100644
61927--- a/include/linux/i2c.h
61928+++ b/include/linux/i2c.h
61929@@ -364,6 +364,7 @@ struct i2c_algorithm {
61930 /* To determine what the adapter supports */
61931 u32 (*functionality) (struct i2c_adapter *);
61932 };
61933+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61934
61935 /*
61936 * i2c_adapter is the structure used to identify a physical i2c bus along
61937diff --git a/include/linux/i2o.h b/include/linux/i2o.h
61938index a6deef4..c56a7f2 100644
61939--- a/include/linux/i2o.h
61940+++ b/include/linux/i2o.h
61941@@ -564,7 +564,7 @@ struct i2o_controller {
61942 struct i2o_device *exec; /* Executive */
61943 #if BITS_PER_LONG == 64
61944 spinlock_t context_list_lock; /* lock for context_list */
61945- atomic_t context_list_counter; /* needed for unique contexts */
61946+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
61947 struct list_head context_list; /* list of context id's
61948 and pointers */
61949 #endif
61950diff --git a/include/linux/if_team.h b/include/linux/if_team.h
61951index 58404b0..439ed95 100644
61952--- a/include/linux/if_team.h
61953+++ b/include/linux/if_team.h
61954@@ -64,6 +64,7 @@ struct team_mode_ops {
61955 void (*port_leave)(struct team *team, struct team_port *port);
61956 void (*port_change_mac)(struct team *team, struct team_port *port);
61957 };
61958+typedef struct team_mode_ops __no_const team_mode_ops_no_const;
61959
61960 enum team_option_type {
61961 TEAM_OPTION_TYPE_U32,
61962@@ -112,7 +113,7 @@ struct team {
61963 struct list_head option_list;
61964
61965 const struct team_mode *mode;
61966- struct team_mode_ops ops;
61967+ team_mode_ops_no_const ops;
61968 long mode_priv[TEAM_MODE_PRIV_LONGS];
61969 };
61970
61971diff --git a/include/linux/init.h b/include/linux/init.h
61972index 6b95109..4aca62c 100644
61973--- a/include/linux/init.h
61974+++ b/include/linux/init.h
61975@@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
61976
61977 /* Each module must use one module_init(). */
61978 #define module_init(initfn) \
61979- static inline initcall_t __inittest(void) \
61980+ static inline __used initcall_t __inittest(void) \
61981 { return initfn; } \
61982 int init_module(void) __attribute__((alias(#initfn)));
61983
61984 /* This is only required if you want to be unloadable. */
61985 #define module_exit(exitfn) \
61986- static inline exitcall_t __exittest(void) \
61987+ static inline __used exitcall_t __exittest(void) \
61988 { return exitfn; } \
61989 void cleanup_module(void) __attribute__((alias(#exitfn)));
61990
61991diff --git a/include/linux/init_task.h b/include/linux/init_task.h
61992index 9c66b1a..a3fdded 100644
61993--- a/include/linux/init_task.h
61994+++ b/include/linux/init_task.h
61995@@ -127,6 +127,12 @@ extern struct cred init_cred;
61996
61997 #define INIT_TASK_COMM "swapper"
61998
61999+#ifdef CONFIG_X86
62000+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
62001+#else
62002+#define INIT_TASK_THREAD_INFO
62003+#endif
62004+
62005 /*
62006 * INIT_TASK is used to set up the first task table, touch at
62007 * your own risk!. Base=0, limit=0x1fffff (=2MB)
62008@@ -165,6 +171,7 @@ extern struct cred init_cred;
62009 RCU_INIT_POINTER(.cred, &init_cred), \
62010 .comm = INIT_TASK_COMM, \
62011 .thread = INIT_THREAD, \
62012+ INIT_TASK_THREAD_INFO \
62013 .fs = &init_fs, \
62014 .files = &init_files, \
62015 .signal = &init_signals, \
62016diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
62017index e6ca56d..8583707 100644
62018--- a/include/linux/intel-iommu.h
62019+++ b/include/linux/intel-iommu.h
62020@@ -296,7 +296,7 @@ struct iommu_flush {
62021 u8 fm, u64 type);
62022 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
62023 unsigned int size_order, u64 type);
62024-};
62025+} __no_const;
62026
62027 enum {
62028 SR_DMAR_FECTL_REG,
62029diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
62030index a64b00e..464d8bc 100644
62031--- a/include/linux/interrupt.h
62032+++ b/include/linux/interrupt.h
62033@@ -441,7 +441,7 @@ enum
62034 /* map softirq index to softirq name. update 'softirq_to_name' in
62035 * kernel/softirq.c when adding a new softirq.
62036 */
62037-extern char *softirq_to_name[NR_SOFTIRQS];
62038+extern const char * const softirq_to_name[NR_SOFTIRQS];
62039
62040 /* softirq mask and active fields moved to irq_cpustat_t in
62041 * asm/hardirq.h to get better cache usage. KAO
62042@@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
62043
62044 struct softirq_action
62045 {
62046- void (*action)(struct softirq_action *);
62047+ void (*action)(void);
62048 };
62049
62050 asmlinkage void do_softirq(void);
62051 asmlinkage void __do_softirq(void);
62052-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
62053+extern void open_softirq(int nr, void (*action)(void));
62054 extern void softirq_init(void);
62055 static inline void __raise_softirq_irqoff(unsigned int nr)
62056 {
62057diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
62058index 3875719..4cd454c 100644
62059--- a/include/linux/kallsyms.h
62060+++ b/include/linux/kallsyms.h
62061@@ -15,7 +15,8 @@
62062
62063 struct module;
62064
62065-#ifdef CONFIG_KALLSYMS
62066+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
62067+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62068 /* Lookup the address for a symbol. Returns 0 if not found. */
62069 unsigned long kallsyms_lookup_name(const char *name);
62070
62071@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
62072 /* Stupid that this does nothing, but I didn't create this mess. */
62073 #define __print_symbol(fmt, addr)
62074 #endif /*CONFIG_KALLSYMS*/
62075+#else /* when included by kallsyms.c, vsnprintf.c, or
62076+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
62077+extern void __print_symbol(const char *fmt, unsigned long address);
62078+extern int sprint_backtrace(char *buffer, unsigned long address);
62079+extern int sprint_symbol(char *buffer, unsigned long address);
62080+const char *kallsyms_lookup(unsigned long addr,
62081+ unsigned long *symbolsize,
62082+ unsigned long *offset,
62083+ char **modname, char *namebuf);
62084+#endif
62085
62086 /* This macro allows us to keep printk typechecking */
62087 static __printf(1, 2)
62088diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
62089index fa39183..40160be 100644
62090--- a/include/linux/kgdb.h
62091+++ b/include/linux/kgdb.h
62092@@ -53,7 +53,7 @@ extern int kgdb_connected;
62093 extern int kgdb_io_module_registered;
62094
62095 extern atomic_t kgdb_setting_breakpoint;
62096-extern atomic_t kgdb_cpu_doing_single_step;
62097+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
62098
62099 extern struct task_struct *kgdb_usethread;
62100 extern struct task_struct *kgdb_contthread;
62101@@ -251,7 +251,7 @@ struct kgdb_arch {
62102 void (*disable_hw_break)(struct pt_regs *regs);
62103 void (*remove_all_hw_break)(void);
62104 void (*correct_hw_break)(void);
62105-};
62106+} __do_const;
62107
62108 /**
62109 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
62110@@ -276,7 +276,7 @@ struct kgdb_io {
62111 void (*pre_exception) (void);
62112 void (*post_exception) (void);
62113 int is_console;
62114-};
62115+} __do_const;
62116
62117 extern struct kgdb_arch arch_kgdb_ops;
62118
62119diff --git a/include/linux/kmod.h b/include/linux/kmod.h
62120index 722f477..eef2a27 100644
62121--- a/include/linux/kmod.h
62122+++ b/include/linux/kmod.h
62123@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
62124 * usually useless though. */
62125 extern __printf(2, 3)
62126 int __request_module(bool wait, const char *name, ...);
62127+extern __printf(3, 4)
62128+int ___request_module(bool wait, char *param_name, const char *name, ...);
62129 #define request_module(mod...) __request_module(true, mod)
62130 #define request_module_nowait(mod...) __request_module(false, mod)
62131 #define try_then_request_module(x, mod...) \
62132diff --git a/include/linux/kref.h b/include/linux/kref.h
62133index 9c07dce..a92fa71 100644
62134--- a/include/linux/kref.h
62135+++ b/include/linux/kref.h
62136@@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
62137 static inline int kref_sub(struct kref *kref, unsigned int count,
62138 void (*release)(struct kref *kref))
62139 {
62140- WARN_ON(release == NULL);
62141+ BUG_ON(release == NULL);
62142
62143 if (atomic_sub_and_test((int) count, &kref->refcount)) {
62144 release(kref);
62145diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
62146index 900c763..3287a0b 100644
62147--- a/include/linux/kvm_host.h
62148+++ b/include/linux/kvm_host.h
62149@@ -326,7 +326,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
62150 void vcpu_load(struct kvm_vcpu *vcpu);
62151 void vcpu_put(struct kvm_vcpu *vcpu);
62152
62153-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62154+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62155 struct module *module);
62156 void kvm_exit(void);
62157
62158@@ -416,20 +416,20 @@ void kvm_get_pfn(pfn_t pfn);
62159 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
62160 int len);
62161 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
62162- unsigned long len);
62163-int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
62164+ unsigned long len) __size_overflow(4);
62165+int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) __size_overflow(2,4);
62166 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
62167- void *data, unsigned long len);
62168+ void *data, unsigned long len) __size_overflow(4);
62169 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
62170 int offset, int len);
62171 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
62172- unsigned long len);
62173+ unsigned long len) __size_overflow(2,4);
62174 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
62175- void *data, unsigned long len);
62176+ void *data, unsigned long len) __size_overflow(4);
62177 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
62178 gpa_t gpa);
62179 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
62180-int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
62181+int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) __size_overflow(2,3);
62182 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
62183 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
62184 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
62185@@ -485,7 +485,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
62186 struct kvm_guest_debug *dbg);
62187 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
62188
62189-int kvm_arch_init(void *opaque);
62190+int kvm_arch_init(const void *opaque);
62191 void kvm_arch_exit(void);
62192
62193 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
62194@@ -721,7 +721,7 @@ int kvm_setup_default_irq_routing(struct kvm *kvm);
62195 int kvm_set_irq_routing(struct kvm *kvm,
62196 const struct kvm_irq_routing_entry *entries,
62197 unsigned nr,
62198- unsigned flags);
62199+ unsigned flags) __size_overflow(3);
62200 void kvm_free_irq_routing(struct kvm *kvm);
62201
62202 #else
62203diff --git a/include/linux/libata.h b/include/linux/libata.h
62204index cafc09a..d7e7829 100644
62205--- a/include/linux/libata.h
62206+++ b/include/linux/libata.h
62207@@ -909,7 +909,7 @@ struct ata_port_operations {
62208 * fields must be pointers.
62209 */
62210 const struct ata_port_operations *inherits;
62211-};
62212+} __do_const;
62213
62214 struct ata_port_info {
62215 unsigned long flags;
62216diff --git a/include/linux/mca.h b/include/linux/mca.h
62217index 3797270..7765ede 100644
62218--- a/include/linux/mca.h
62219+++ b/include/linux/mca.h
62220@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
62221 int region);
62222 void * (*mca_transform_memory)(struct mca_device *,
62223 void *memory);
62224-};
62225+} __no_const;
62226
62227 struct mca_bus {
62228 u64 default_dma_mask;
62229diff --git a/include/linux/memory.h b/include/linux/memory.h
62230index 1ac7f6e..a5794d0 100644
62231--- a/include/linux/memory.h
62232+++ b/include/linux/memory.h
62233@@ -143,7 +143,7 @@ struct memory_accessor {
62234 size_t count);
62235 ssize_t (*write)(struct memory_accessor *, const char *buf,
62236 off_t offset, size_t count);
62237-};
62238+} __no_const;
62239
62240 /*
62241 * Kernel text modification mutex, used for code patching. Users of this lock
62242diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
62243index 9970337..9444122 100644
62244--- a/include/linux/mfd/abx500.h
62245+++ b/include/linux/mfd/abx500.h
62246@@ -188,6 +188,7 @@ struct abx500_ops {
62247 int (*event_registers_startup_state_get) (struct device *, u8 *);
62248 int (*startup_irq_enabled) (struct device *, unsigned int);
62249 };
62250+typedef struct abx500_ops __no_const abx500_ops_no_const;
62251
62252 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
62253 void abx500_remove_ops(struct device *dev);
62254diff --git a/include/linux/mm.h b/include/linux/mm.h
62255index 17b27cd..467ba2f 100644
62256--- a/include/linux/mm.h
62257+++ b/include/linux/mm.h
62258@@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
62259
62260 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
62261 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
62262+
62263+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62264+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
62265+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
62266+#else
62267 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
62268+#endif
62269+
62270 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
62271 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
62272
62273@@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
62274 int set_page_dirty_lock(struct page *page);
62275 int clear_page_dirty_for_io(struct page *page);
62276
62277-/* Is the vma a continuation of the stack vma above it? */
62278-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
62279-{
62280- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
62281-}
62282-
62283-static inline int stack_guard_page_start(struct vm_area_struct *vma,
62284- unsigned long addr)
62285-{
62286- return (vma->vm_flags & VM_GROWSDOWN) &&
62287- (vma->vm_start == addr) &&
62288- !vma_growsdown(vma->vm_prev, addr);
62289-}
62290-
62291-/* Is the vma a continuation of the stack vma below it? */
62292-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
62293-{
62294- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
62295-}
62296-
62297-static inline int stack_guard_page_end(struct vm_area_struct *vma,
62298- unsigned long addr)
62299-{
62300- return (vma->vm_flags & VM_GROWSUP) &&
62301- (vma->vm_end == addr) &&
62302- !vma_growsup(vma->vm_next, addr);
62303-}
62304-
62305 extern unsigned long move_page_tables(struct vm_area_struct *vma,
62306 unsigned long old_addr, struct vm_area_struct *new_vma,
62307 unsigned long new_addr, unsigned long len);
62308@@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
62309 }
62310 #endif
62311
62312+#ifdef CONFIG_MMU
62313+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
62314+#else
62315+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
62316+{
62317+ return __pgprot(0);
62318+}
62319+#endif
62320+
62321 int vma_wants_writenotify(struct vm_area_struct *vma);
62322
62323 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
62324@@ -1409,6 +1397,7 @@ out:
62325 }
62326
62327 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
62328+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
62329
62330 extern unsigned long do_brk(unsigned long, unsigned long);
62331
62332@@ -1466,6 +1455,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
62333 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
62334 struct vm_area_struct **pprev);
62335
62336+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
62337+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
62338+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
62339+
62340 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
62341 NULL if none. Assume start_addr < end_addr. */
62342 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
62343@@ -1494,15 +1487,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
62344 return vma;
62345 }
62346
62347-#ifdef CONFIG_MMU
62348-pgprot_t vm_get_page_prot(unsigned long vm_flags);
62349-#else
62350-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
62351-{
62352- return __pgprot(0);
62353-}
62354-#endif
62355-
62356 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
62357 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
62358 unsigned long pfn, unsigned long size, pgprot_t);
62359@@ -1606,7 +1590,7 @@ extern int unpoison_memory(unsigned long pfn);
62360 extern int sysctl_memory_failure_early_kill;
62361 extern int sysctl_memory_failure_recovery;
62362 extern void shake_page(struct page *p, int access);
62363-extern atomic_long_t mce_bad_pages;
62364+extern atomic_long_unchecked_t mce_bad_pages;
62365 extern int soft_offline_page(struct page *page, int flags);
62366
62367 extern void dump_page(struct page *page);
62368@@ -1637,5 +1621,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
62369 static inline bool page_is_guard(struct page *page) { return false; }
62370 #endif /* CONFIG_DEBUG_PAGEALLOC */
62371
62372+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62373+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
62374+#else
62375+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
62376+#endif
62377+
62378 #endif /* __KERNEL__ */
62379 #endif /* _LINUX_MM_H */
62380diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
62381index 3cc3062..efeaeb7 100644
62382--- a/include/linux/mm_types.h
62383+++ b/include/linux/mm_types.h
62384@@ -252,6 +252,8 @@ struct vm_area_struct {
62385 #ifdef CONFIG_NUMA
62386 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
62387 #endif
62388+
62389+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
62390 };
62391
62392 struct core_thread {
62393@@ -326,7 +328,7 @@ struct mm_struct {
62394 unsigned long def_flags;
62395 unsigned long nr_ptes; /* Page table pages */
62396 unsigned long start_code, end_code, start_data, end_data;
62397- unsigned long start_brk, brk, start_stack;
62398+ unsigned long brk_gap, start_brk, brk, start_stack;
62399 unsigned long arg_start, arg_end, env_start, env_end;
62400
62401 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
62402@@ -388,6 +390,24 @@ struct mm_struct {
62403 #ifdef CONFIG_CPUMASK_OFFSTACK
62404 struct cpumask cpumask_allocation;
62405 #endif
62406+
62407+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62408+ unsigned long pax_flags;
62409+#endif
62410+
62411+#ifdef CONFIG_PAX_DLRESOLVE
62412+ unsigned long call_dl_resolve;
62413+#endif
62414+
62415+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
62416+ unsigned long call_syscall;
62417+#endif
62418+
62419+#ifdef CONFIG_PAX_ASLR
62420+ unsigned long delta_mmap; /* randomized offset */
62421+ unsigned long delta_stack; /* randomized offset */
62422+#endif
62423+
62424 };
62425
62426 static inline void mm_init_cpumask(struct mm_struct *mm)
62427diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
62428index 1d1b1e1..2a13c78 100644
62429--- a/include/linux/mmu_notifier.h
62430+++ b/include/linux/mmu_notifier.h
62431@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
62432 */
62433 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
62434 ({ \
62435- pte_t __pte; \
62436+ pte_t ___pte; \
62437 struct vm_area_struct *___vma = __vma; \
62438 unsigned long ___address = __address; \
62439- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
62440+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
62441 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
62442- __pte; \
62443+ ___pte; \
62444 })
62445
62446 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
62447diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
62448index 650ba2f..af0a58c 100644
62449--- a/include/linux/mmzone.h
62450+++ b/include/linux/mmzone.h
62451@@ -379,7 +379,7 @@ struct zone {
62452 unsigned long flags; /* zone flags, see below */
62453
62454 /* Zone statistics */
62455- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62456+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62457
62458 /*
62459 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
62460diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
62461index 83ac071..2656e0e 100644
62462--- a/include/linux/mod_devicetable.h
62463+++ b/include/linux/mod_devicetable.h
62464@@ -12,7 +12,7 @@
62465 typedef unsigned long kernel_ulong_t;
62466 #endif
62467
62468-#define PCI_ANY_ID (~0)
62469+#define PCI_ANY_ID ((__u16)~0)
62470
62471 struct pci_device_id {
62472 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
62473@@ -131,7 +131,7 @@ struct usb_device_id {
62474 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
62475 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
62476
62477-#define HID_ANY_ID (~0)
62478+#define HID_ANY_ID (~0U)
62479
62480 struct hid_device_id {
62481 __u16 bus;
62482diff --git a/include/linux/module.h b/include/linux/module.h
62483index 4598bf0..e069d7f 100644
62484--- a/include/linux/module.h
62485+++ b/include/linux/module.h
62486@@ -17,6 +17,7 @@
62487 #include <linux/moduleparam.h>
62488 #include <linux/tracepoint.h>
62489 #include <linux/export.h>
62490+#include <linux/fs.h>
62491
62492 #include <linux/percpu.h>
62493 #include <asm/module.h>
62494@@ -275,19 +276,16 @@ struct module
62495 int (*init)(void);
62496
62497 /* If this is non-NULL, vfree after init() returns */
62498- void *module_init;
62499+ void *module_init_rx, *module_init_rw;
62500
62501 /* Here is the actual code + data, vfree'd on unload. */
62502- void *module_core;
62503+ void *module_core_rx, *module_core_rw;
62504
62505 /* Here are the sizes of the init and core sections */
62506- unsigned int init_size, core_size;
62507+ unsigned int init_size_rw, core_size_rw;
62508
62509 /* The size of the executable code in each section. */
62510- unsigned int init_text_size, core_text_size;
62511-
62512- /* Size of RO sections of the module (text+rodata) */
62513- unsigned int init_ro_size, core_ro_size;
62514+ unsigned int init_size_rx, core_size_rx;
62515
62516 /* Arch-specific module values */
62517 struct mod_arch_specific arch;
62518@@ -343,6 +341,10 @@ struct module
62519 #ifdef CONFIG_EVENT_TRACING
62520 struct ftrace_event_call **trace_events;
62521 unsigned int num_trace_events;
62522+ struct file_operations trace_id;
62523+ struct file_operations trace_enable;
62524+ struct file_operations trace_format;
62525+ struct file_operations trace_filter;
62526 #endif
62527 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
62528 unsigned int num_ftrace_callsites;
62529@@ -390,16 +392,46 @@ bool is_module_address(unsigned long addr);
62530 bool is_module_percpu_address(unsigned long addr);
62531 bool is_module_text_address(unsigned long addr);
62532
62533+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
62534+{
62535+
62536+#ifdef CONFIG_PAX_KERNEXEC
62537+ if (ktla_ktva(addr) >= (unsigned long)start &&
62538+ ktla_ktva(addr) < (unsigned long)start + size)
62539+ return 1;
62540+#endif
62541+
62542+ return ((void *)addr >= start && (void *)addr < start + size);
62543+}
62544+
62545+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
62546+{
62547+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
62548+}
62549+
62550+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
62551+{
62552+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
62553+}
62554+
62555+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
62556+{
62557+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
62558+}
62559+
62560+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
62561+{
62562+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
62563+}
62564+
62565 static inline int within_module_core(unsigned long addr, struct module *mod)
62566 {
62567- return (unsigned long)mod->module_core <= addr &&
62568- addr < (unsigned long)mod->module_core + mod->core_size;
62569+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
62570 }
62571
62572 static inline int within_module_init(unsigned long addr, struct module *mod)
62573 {
62574- return (unsigned long)mod->module_init <= addr &&
62575- addr < (unsigned long)mod->module_init + mod->init_size;
62576+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
62577 }
62578
62579 /* Search for module by name: must hold module_mutex. */
62580diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
62581index b2be02e..72d2f78 100644
62582--- a/include/linux/moduleloader.h
62583+++ b/include/linux/moduleloader.h
62584@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
62585
62586 /* Allocator used for allocating struct module, core sections and init
62587 sections. Returns NULL on failure. */
62588-void *module_alloc(unsigned long size);
62589+void *module_alloc(unsigned long size) __size_overflow(1);
62590+
62591+#ifdef CONFIG_PAX_KERNEXEC
62592+void *module_alloc_exec(unsigned long size) __size_overflow(1);
62593+#else
62594+#define module_alloc_exec(x) module_alloc(x)
62595+#endif
62596
62597 /* Free memory returned from module_alloc. */
62598 void module_free(struct module *mod, void *module_region);
62599
62600+#ifdef CONFIG_PAX_KERNEXEC
62601+void module_free_exec(struct module *mod, void *module_region);
62602+#else
62603+#define module_free_exec(x, y) module_free((x), (y))
62604+#endif
62605+
62606 /* Apply the given relocation to the (simplified) ELF. Return -error
62607 or 0. */
62608 int apply_relocate(Elf_Shdr *sechdrs,
62609diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
62610index c47f4d6..23f9bdb 100644
62611--- a/include/linux/moduleparam.h
62612+++ b/include/linux/moduleparam.h
62613@@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
62614 * @len is usually just sizeof(string).
62615 */
62616 #define module_param_string(name, string, len, perm) \
62617- static const struct kparam_string __param_string_##name \
62618+ static const struct kparam_string __param_string_##name __used \
62619 = { len, string }; \
62620 __module_param_call(MODULE_PARAM_PREFIX, name, \
62621 &param_ops_string, \
62622@@ -396,7 +396,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
62623 */
62624 #define module_param_array_named(name, array, type, nump, perm) \
62625 param_check_##type(name, &(array)[0]); \
62626- static const struct kparam_array __param_arr_##name \
62627+ static const struct kparam_array __param_arr_##name __used \
62628 = { .max = ARRAY_SIZE(array), .num = nump, \
62629 .ops = &param_ops_##type, \
62630 .elemsize = sizeof(array[0]), .elem = array }; \
62631diff --git a/include/linux/namei.h b/include/linux/namei.h
62632index ffc0213..2c1f2cb 100644
62633--- a/include/linux/namei.h
62634+++ b/include/linux/namei.h
62635@@ -24,7 +24,7 @@ struct nameidata {
62636 unsigned seq;
62637 int last_type;
62638 unsigned depth;
62639- char *saved_names[MAX_NESTED_LINKS + 1];
62640+ const char *saved_names[MAX_NESTED_LINKS + 1];
62641
62642 /* Intent data */
62643 union {
62644@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
62645 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
62646 extern void unlock_rename(struct dentry *, struct dentry *);
62647
62648-static inline void nd_set_link(struct nameidata *nd, char *path)
62649+static inline void nd_set_link(struct nameidata *nd, const char *path)
62650 {
62651 nd->saved_names[nd->depth] = path;
62652 }
62653
62654-static inline char *nd_get_link(struct nameidata *nd)
62655+static inline const char *nd_get_link(const struct nameidata *nd)
62656 {
62657 return nd->saved_names[nd->depth];
62658 }
62659diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
62660index 0eac07c..a59f6a8 100644
62661--- a/include/linux/netdevice.h
62662+++ b/include/linux/netdevice.h
62663@@ -1002,6 +1002,7 @@ struct net_device_ops {
62664 int (*ndo_neigh_construct)(struct neighbour *n);
62665 void (*ndo_neigh_destroy)(struct neighbour *n);
62666 };
62667+typedef struct net_device_ops __no_const net_device_ops_no_const;
62668
62669 /*
62670 * The DEVICE structure.
62671@@ -1063,7 +1064,7 @@ struct net_device {
62672 int iflink;
62673
62674 struct net_device_stats stats;
62675- atomic_long_t rx_dropped; /* dropped packets by core network
62676+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
62677 * Do not use this in drivers.
62678 */
62679
62680diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
62681new file mode 100644
62682index 0000000..33f4af8
62683--- /dev/null
62684+++ b/include/linux/netfilter/xt_gradm.h
62685@@ -0,0 +1,9 @@
62686+#ifndef _LINUX_NETFILTER_XT_GRADM_H
62687+#define _LINUX_NETFILTER_XT_GRADM_H 1
62688+
62689+struct xt_gradm_mtinfo {
62690+ __u16 flags;
62691+ __u16 invflags;
62692+};
62693+
62694+#endif
62695diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
62696index c65a18a..0c05f3a 100644
62697--- a/include/linux/of_pdt.h
62698+++ b/include/linux/of_pdt.h
62699@@ -32,7 +32,7 @@ struct of_pdt_ops {
62700
62701 /* return 0 on success; fill in 'len' with number of bytes in path */
62702 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
62703-};
62704+} __no_const;
62705
62706 extern void *prom_early_alloc(unsigned long size);
62707
62708diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
62709index a4c5624..2dabfb7 100644
62710--- a/include/linux/oprofile.h
62711+++ b/include/linux/oprofile.h
62712@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
62713 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
62714 char const * name, ulong * val);
62715
62716-/** Create a file for read-only access to an atomic_t. */
62717+/** Create a file for read-only access to an atomic_unchecked_t. */
62718 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
62719- char const * name, atomic_t * val);
62720+ char const * name, atomic_unchecked_t * val);
62721
62722 /** create a directory */
62723 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
62724@@ -163,7 +163,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co
62725 * Read an ASCII string for a number from a userspace buffer and fill *val on success.
62726 * Returns 0 on success, < 0 on error.
62727 */
62728-int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
62729+int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count) __size_overflow(3);
62730
62731 /** lock for read/write safety */
62732 extern raw_spinlock_t oprofilefs_lock;
62733diff --git a/include/linux/padata.h b/include/linux/padata.h
62734index 4633b2f..988bc08 100644
62735--- a/include/linux/padata.h
62736+++ b/include/linux/padata.h
62737@@ -129,7 +129,7 @@ struct parallel_data {
62738 struct padata_instance *pinst;
62739 struct padata_parallel_queue __percpu *pqueue;
62740 struct padata_serial_queue __percpu *squeue;
62741- atomic_t seq_nr;
62742+ atomic_unchecked_t seq_nr;
62743 atomic_t reorder_objects;
62744 atomic_t refcnt;
62745 unsigned int max_seq_nr;
62746diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
62747index abb2776..d8b8e15 100644
62748--- a/include/linux/perf_event.h
62749+++ b/include/linux/perf_event.h
62750@@ -750,8 +750,8 @@ struct perf_event {
62751
62752 enum perf_event_active_state state;
62753 unsigned int attach_state;
62754- local64_t count;
62755- atomic64_t child_count;
62756+ local64_t count; /* PaX: fix it one day */
62757+ atomic64_unchecked_t child_count;
62758
62759 /*
62760 * These are the total time in nanoseconds that the event
62761@@ -802,8 +802,8 @@ struct perf_event {
62762 * These accumulate total time (in nanoseconds) that children
62763 * events have been enabled and running, respectively.
62764 */
62765- atomic64_t child_total_time_enabled;
62766- atomic64_t child_total_time_running;
62767+ atomic64_unchecked_t child_total_time_enabled;
62768+ atomic64_unchecked_t child_total_time_running;
62769
62770 /*
62771 * Protect attach/detach and child_list:
62772diff --git a/include/linux/personality.h b/include/linux/personality.h
62773index 8fc7dd1a..c19d89e 100644
62774--- a/include/linux/personality.h
62775+++ b/include/linux/personality.h
62776@@ -44,6 +44,7 @@ enum {
62777 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
62778 ADDR_NO_RANDOMIZE | \
62779 ADDR_COMPAT_LAYOUT | \
62780+ ADDR_LIMIT_3GB | \
62781 MMAP_PAGE_ZERO)
62782
62783 /*
62784diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
62785index 77257c9..51d473a 100644
62786--- a/include/linux/pipe_fs_i.h
62787+++ b/include/linux/pipe_fs_i.h
62788@@ -46,9 +46,9 @@ struct pipe_buffer {
62789 struct pipe_inode_info {
62790 wait_queue_head_t wait;
62791 unsigned int nrbufs, curbuf, buffers;
62792- unsigned int readers;
62793- unsigned int writers;
62794- unsigned int waiting_writers;
62795+ atomic_t readers;
62796+ atomic_t writers;
62797+ atomic_t waiting_writers;
62798 unsigned int r_counter;
62799 unsigned int w_counter;
62800 struct page *tmp_page;
62801diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
62802index 609daae..5392427 100644
62803--- a/include/linux/pm_runtime.h
62804+++ b/include/linux/pm_runtime.h
62805@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
62806
62807 static inline void pm_runtime_mark_last_busy(struct device *dev)
62808 {
62809- ACCESS_ONCE(dev->power.last_busy) = jiffies;
62810+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
62811 }
62812
62813 #else /* !CONFIG_PM_RUNTIME */
62814diff --git a/include/linux/poison.h b/include/linux/poison.h
62815index 2110a81..13a11bb 100644
62816--- a/include/linux/poison.h
62817+++ b/include/linux/poison.h
62818@@ -19,8 +19,8 @@
62819 * under normal circumstances, used to verify that nobody uses
62820 * non-initialized list entries.
62821 */
62822-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
62823-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
62824+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
62825+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
62826
62827 /********** include/linux/timer.h **********/
62828 /*
62829diff --git a/include/linux/preempt.h b/include/linux/preempt.h
62830index 58969b2..ead129b 100644
62831--- a/include/linux/preempt.h
62832+++ b/include/linux/preempt.h
62833@@ -123,7 +123,7 @@ struct preempt_ops {
62834 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
62835 void (*sched_out)(struct preempt_notifier *notifier,
62836 struct task_struct *next);
62837-};
62838+} __no_const;
62839
62840 /**
62841 * preempt_notifier - key for installing preemption notifiers
62842diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
62843index 85c5073..51fac8b 100644
62844--- a/include/linux/proc_fs.h
62845+++ b/include/linux/proc_fs.h
62846@@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
62847 return proc_create_data(name, mode, parent, proc_fops, NULL);
62848 }
62849
62850+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
62851+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
62852+{
62853+#ifdef CONFIG_GRKERNSEC_PROC_USER
62854+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62855+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62856+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62857+#else
62858+ return proc_create_data(name, mode, parent, proc_fops, NULL);
62859+#endif
62860+}
62861+
62862 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
62863 umode_t mode, struct proc_dir_entry *base,
62864 read_proc_t *read_proc, void * data)
62865@@ -258,7 +270,7 @@ union proc_op {
62866 int (*proc_show)(struct seq_file *m,
62867 struct pid_namespace *ns, struct pid *pid,
62868 struct task_struct *task);
62869-};
62870+} __no_const;
62871
62872 struct ctl_table_header;
62873 struct ctl_table;
62874diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
62875index c2f1f6a..6fdb196 100644
62876--- a/include/linux/ptrace.h
62877+++ b/include/linux/ptrace.h
62878@@ -199,9 +199,10 @@ static inline void ptrace_event(int event, unsigned long message)
62879 if (unlikely(ptrace_event_enabled(current, event))) {
62880 current->ptrace_message = message;
62881 ptrace_notify((event << 8) | SIGTRAP);
62882- } else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) {
62883+ } else if (event == PTRACE_EVENT_EXEC) {
62884 /* legacy EXEC report via SIGTRAP */
62885- send_sig(SIGTRAP, current, 0);
62886+ if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
62887+ send_sig(SIGTRAP, current, 0);
62888 }
62889 }
62890
62891diff --git a/include/linux/random.h b/include/linux/random.h
62892index 8f74538..02a1012 100644
62893--- a/include/linux/random.h
62894+++ b/include/linux/random.h
62895@@ -69,12 +69,17 @@ void srandom32(u32 seed);
62896
62897 u32 prandom32(struct rnd_state *);
62898
62899+static inline unsigned long pax_get_random_long(void)
62900+{
62901+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62902+}
62903+
62904 /*
62905 * Handle minimum values for seeds
62906 */
62907 static inline u32 __seed(u32 x, u32 m)
62908 {
62909- return (x < m) ? x + m : x;
62910+ return (x <= m) ? x + m + 1 : x;
62911 }
62912
62913 /**
62914diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62915index e0879a7..a12f962 100644
62916--- a/include/linux/reboot.h
62917+++ b/include/linux/reboot.h
62918@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
62919 * Architecture-specific implementations of sys_reboot commands.
62920 */
62921
62922-extern void machine_restart(char *cmd);
62923-extern void machine_halt(void);
62924-extern void machine_power_off(void);
62925+extern void machine_restart(char *cmd) __noreturn;
62926+extern void machine_halt(void) __noreturn;
62927+extern void machine_power_off(void) __noreturn;
62928
62929 extern void machine_shutdown(void);
62930 struct pt_regs;
62931@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
62932 */
62933
62934 extern void kernel_restart_prepare(char *cmd);
62935-extern void kernel_restart(char *cmd);
62936-extern void kernel_halt(void);
62937-extern void kernel_power_off(void);
62938+extern void kernel_restart(char *cmd) __noreturn;
62939+extern void kernel_halt(void) __noreturn;
62940+extern void kernel_power_off(void) __noreturn;
62941
62942 extern int C_A_D; /* for sysctl */
62943 void ctrl_alt_del(void);
62944@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
62945 * Emergency restart, callable from an interrupt handler.
62946 */
62947
62948-extern void emergency_restart(void);
62949+extern void emergency_restart(void) __noreturn;
62950 #include <asm/emergency-restart.h>
62951
62952 #endif
62953diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
62954index 2213ddc..650212a 100644
62955--- a/include/linux/reiserfs_fs.h
62956+++ b/include/linux/reiserfs_fs.h
62957@@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
62958 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
62959
62960 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
62961-#define get_generation(s) atomic_read (&fs_generation(s))
62962+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
62963 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
62964 #define __fs_changed(gen,s) (gen != get_generation (s))
62965 #define fs_changed(gen,s) \
62966diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
62967index 8c9e85c..1698e9a 100644
62968--- a/include/linux/reiserfs_fs_sb.h
62969+++ b/include/linux/reiserfs_fs_sb.h
62970@@ -386,7 +386,7 @@ struct reiserfs_sb_info {
62971 /* Comment? -Hans */
62972 wait_queue_head_t s_wait;
62973 /* To be obsoleted soon by per buffer seals.. -Hans */
62974- atomic_t s_generation_counter; // increased by one every time the
62975+ atomic_unchecked_t s_generation_counter; // increased by one every time the
62976 // tree gets re-balanced
62977 unsigned long s_properties; /* File system properties. Currently holds
62978 on-disk FS format */
62979diff --git a/include/linux/relay.h b/include/linux/relay.h
62980index a822fd7..62b70f6 100644
62981--- a/include/linux/relay.h
62982+++ b/include/linux/relay.h
62983@@ -159,7 +159,7 @@ struct rchan_callbacks
62984 * The callback should return 0 if successful, negative if not.
62985 */
62986 int (*remove_buf_file)(struct dentry *dentry);
62987-};
62988+} __no_const;
62989
62990 /*
62991 * CONFIG_RELAY kernel API, kernel/relay.c
62992diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
62993index c6c6084..5bf1212 100644
62994--- a/include/linux/rfkill.h
62995+++ b/include/linux/rfkill.h
62996@@ -147,6 +147,7 @@ struct rfkill_ops {
62997 void (*query)(struct rfkill *rfkill, void *data);
62998 int (*set_block)(void *data, bool blocked);
62999 };
63000+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
63001
63002 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
63003 /**
63004diff --git a/include/linux/rio.h b/include/linux/rio.h
63005index 4d50611..c6858a2 100644
63006--- a/include/linux/rio.h
63007+++ b/include/linux/rio.h
63008@@ -315,7 +315,7 @@ struct rio_ops {
63009 int mbox, void *buffer, size_t len);
63010 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
63011 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
63012-};
63013+} __no_const;
63014
63015 #define RIO_RESOURCE_MEM 0x00000100
63016 #define RIO_RESOURCE_DOORBELL 0x00000200
63017diff --git a/include/linux/rmap.h b/include/linux/rmap.h
63018index 1cdd62a..e399f0d 100644
63019--- a/include/linux/rmap.h
63020+++ b/include/linux/rmap.h
63021@@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
63022 void anon_vma_init(void); /* create anon_vma_cachep */
63023 int anon_vma_prepare(struct vm_area_struct *);
63024 void unlink_anon_vmas(struct vm_area_struct *);
63025-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
63026+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
63027 void anon_vma_moveto_tail(struct vm_area_struct *);
63028-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
63029+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
63030 void __anon_vma_link(struct vm_area_struct *);
63031
63032 static inline void anon_vma_merge(struct vm_area_struct *vma,
63033diff --git a/include/linux/sched.h b/include/linux/sched.h
63034index 0657368..765f70f 100644
63035--- a/include/linux/sched.h
63036+++ b/include/linux/sched.h
63037@@ -101,6 +101,7 @@ struct bio_list;
63038 struct fs_struct;
63039 struct perf_event_context;
63040 struct blk_plug;
63041+struct linux_binprm;
63042
63043 /*
63044 * List of flags we want to share for kernel threads,
63045@@ -382,10 +383,13 @@ struct user_namespace;
63046 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
63047
63048 extern int sysctl_max_map_count;
63049+extern unsigned long sysctl_heap_stack_gap;
63050
63051 #include <linux/aio.h>
63052
63053 #ifdef CONFIG_MMU
63054+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
63055+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
63056 extern void arch_pick_mmap_layout(struct mm_struct *mm);
63057 extern unsigned long
63058 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
63059@@ -631,6 +635,17 @@ struct signal_struct {
63060 #ifdef CONFIG_TASKSTATS
63061 struct taskstats *stats;
63062 #endif
63063+
63064+#ifdef CONFIG_GRKERNSEC
63065+ u32 curr_ip;
63066+ u32 saved_ip;
63067+ u32 gr_saddr;
63068+ u32 gr_daddr;
63069+ u16 gr_sport;
63070+ u16 gr_dport;
63071+ u8 used_accept:1;
63072+#endif
63073+
63074 #ifdef CONFIG_AUDIT
63075 unsigned audit_tty;
63076 struct tty_audit_buf *tty_audit_buf;
63077@@ -714,6 +729,11 @@ struct user_struct {
63078 struct key *session_keyring; /* UID's default session keyring */
63079 #endif
63080
63081+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63082+ unsigned int banned;
63083+ unsigned long ban_expires;
63084+#endif
63085+
63086 /* Hash table maintenance information */
63087 struct hlist_node uidhash_node;
63088 uid_t uid;
63089@@ -1354,8 +1374,8 @@ struct task_struct {
63090 struct list_head thread_group;
63091
63092 struct completion *vfork_done; /* for vfork() */
63093- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
63094- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63095+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
63096+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63097
63098 cputime_t utime, stime, utimescaled, stimescaled;
63099 cputime_t gtime;
63100@@ -1371,13 +1391,6 @@ struct task_struct {
63101 struct task_cputime cputime_expires;
63102 struct list_head cpu_timers[3];
63103
63104-/* process credentials */
63105- const struct cred __rcu *real_cred; /* objective and real subjective task
63106- * credentials (COW) */
63107- const struct cred __rcu *cred; /* effective (overridable) subjective task
63108- * credentials (COW) */
63109- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63110-
63111 char comm[TASK_COMM_LEN]; /* executable name excluding path
63112 - access with [gs]et_task_comm (which lock
63113 it with task_lock())
63114@@ -1394,8 +1407,16 @@ struct task_struct {
63115 #endif
63116 /* CPU-specific state of this task */
63117 struct thread_struct thread;
63118+/* thread_info moved to task_struct */
63119+#ifdef CONFIG_X86
63120+ struct thread_info tinfo;
63121+#endif
63122 /* filesystem information */
63123 struct fs_struct *fs;
63124+
63125+ const struct cred __rcu *cred; /* effective (overridable) subjective task
63126+ * credentials (COW) */
63127+
63128 /* open file information */
63129 struct files_struct *files;
63130 /* namespaces */
63131@@ -1442,6 +1463,11 @@ struct task_struct {
63132 struct rt_mutex_waiter *pi_blocked_on;
63133 #endif
63134
63135+/* process credentials */
63136+ const struct cred __rcu *real_cred; /* objective and real subjective task
63137+ * credentials (COW) */
63138+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63139+
63140 #ifdef CONFIG_DEBUG_MUTEXES
63141 /* mutex deadlock detection */
63142 struct mutex_waiter *blocked_on;
63143@@ -1558,6 +1584,27 @@ struct task_struct {
63144 unsigned long default_timer_slack_ns;
63145
63146 struct list_head *scm_work_list;
63147+
63148+#ifdef CONFIG_GRKERNSEC
63149+ /* grsecurity */
63150+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63151+ u64 exec_id;
63152+#endif
63153+#ifdef CONFIG_GRKERNSEC_SETXID
63154+ const struct cred *delayed_cred;
63155+#endif
63156+ struct dentry *gr_chroot_dentry;
63157+ struct acl_subject_label *acl;
63158+ struct acl_role_label *role;
63159+ struct file *exec_file;
63160+ u16 acl_role_id;
63161+ /* is this the task that authenticated to the special role */
63162+ u8 acl_sp_role;
63163+ u8 is_writable;
63164+ u8 brute;
63165+ u8 gr_is_chrooted;
63166+#endif
63167+
63168 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
63169 /* Index of current stored address in ret_stack */
63170 int curr_ret_stack;
63171@@ -1592,6 +1639,51 @@ struct task_struct {
63172 #endif
63173 };
63174
63175+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
63176+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
63177+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
63178+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
63179+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
63180+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
63181+
63182+#ifdef CONFIG_PAX_SOFTMODE
63183+extern int pax_softmode;
63184+#endif
63185+
63186+extern int pax_check_flags(unsigned long *);
63187+
63188+/* if tsk != current then task_lock must be held on it */
63189+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
63190+static inline unsigned long pax_get_flags(struct task_struct *tsk)
63191+{
63192+ if (likely(tsk->mm))
63193+ return tsk->mm->pax_flags;
63194+ else
63195+ return 0UL;
63196+}
63197+
63198+/* if tsk != current then task_lock must be held on it */
63199+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
63200+{
63201+ if (likely(tsk->mm)) {
63202+ tsk->mm->pax_flags = flags;
63203+ return 0;
63204+ }
63205+ return -EINVAL;
63206+}
63207+#endif
63208+
63209+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
63210+extern void pax_set_initial_flags(struct linux_binprm *bprm);
63211+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
63212+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
63213+#endif
63214+
63215+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
63216+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
63217+extern void pax_report_refcount_overflow(struct pt_regs *regs);
63218+extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
63219+
63220 /* Future-safe accessor for struct task_struct's cpus_allowed. */
63221 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
63222
63223@@ -2104,7 +2196,9 @@ void yield(void);
63224 extern struct exec_domain default_exec_domain;
63225
63226 union thread_union {
63227+#ifndef CONFIG_X86
63228 struct thread_info thread_info;
63229+#endif
63230 unsigned long stack[THREAD_SIZE/sizeof(long)];
63231 };
63232
63233@@ -2137,6 +2231,7 @@ extern struct pid_namespace init_pid_ns;
63234 */
63235
63236 extern struct task_struct *find_task_by_vpid(pid_t nr);
63237+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
63238 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
63239 struct pid_namespace *ns);
63240
63241@@ -2280,7 +2375,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
63242 extern void exit_itimers(struct signal_struct *);
63243 extern void flush_itimer_signals(void);
63244
63245-extern void do_group_exit(int);
63246+extern __noreturn void do_group_exit(int);
63247
63248 extern void daemonize(const char *, ...);
63249 extern int allow_signal(int);
63250@@ -2478,13 +2573,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
63251
63252 #endif
63253
63254-static inline int object_is_on_stack(void *obj)
63255+static inline int object_starts_on_stack(void *obj)
63256 {
63257- void *stack = task_stack_page(current);
63258+ const void *stack = task_stack_page(current);
63259
63260 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
63261 }
63262
63263+#ifdef CONFIG_PAX_USERCOPY
63264+extern int object_is_on_stack(const void *obj, unsigned long len);
63265+#endif
63266+
63267 extern void thread_info_cache_init(void);
63268
63269 #ifdef CONFIG_DEBUG_STACK_USAGE
63270diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
63271index 899fbb4..1cb4138 100644
63272--- a/include/linux/screen_info.h
63273+++ b/include/linux/screen_info.h
63274@@ -43,7 +43,8 @@ struct screen_info {
63275 __u16 pages; /* 0x32 */
63276 __u16 vesa_attributes; /* 0x34 */
63277 __u32 capabilities; /* 0x36 */
63278- __u8 _reserved[6]; /* 0x3a */
63279+ __u16 vesapm_size; /* 0x3a */
63280+ __u8 _reserved[4]; /* 0x3c */
63281 } __attribute__((packed));
63282
63283 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
63284diff --git a/include/linux/security.h b/include/linux/security.h
63285index 83c18e8..2d98860 100644
63286--- a/include/linux/security.h
63287+++ b/include/linux/security.h
63288@@ -37,6 +37,7 @@
63289 #include <linux/xfrm.h>
63290 #include <linux/slab.h>
63291 #include <linux/xattr.h>
63292+#include <linux/grsecurity.h>
63293 #include <net/flow.h>
63294
63295 /* Maximum number of letters for an LSM name string */
63296diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
63297index 44f1514..2bbf6c1 100644
63298--- a/include/linux/seq_file.h
63299+++ b/include/linux/seq_file.h
63300@@ -24,6 +24,9 @@ struct seq_file {
63301 struct mutex lock;
63302 const struct seq_operations *op;
63303 int poll_event;
63304+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63305+ u64 exec_id;
63306+#endif
63307 void *private;
63308 };
63309
63310@@ -33,6 +36,7 @@ struct seq_operations {
63311 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
63312 int (*show) (struct seq_file *m, void *v);
63313 };
63314+typedef struct seq_operations __no_const seq_operations_no_const;
63315
63316 #define SEQ_SKIP 1
63317
63318diff --git a/include/linux/shm.h b/include/linux/shm.h
63319index 92808b8..c28cac4 100644
63320--- a/include/linux/shm.h
63321+++ b/include/linux/shm.h
63322@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
63323
63324 /* The task created the shm object. NULL if the task is dead. */
63325 struct task_struct *shm_creator;
63326+#ifdef CONFIG_GRKERNSEC
63327+ time_t shm_createtime;
63328+ pid_t shm_lapid;
63329+#endif
63330 };
63331
63332 /* shm_mode upper byte flags */
63333diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
63334index ae86ade..2b51468 100644
63335--- a/include/linux/skbuff.h
63336+++ b/include/linux/skbuff.h
63337@@ -654,7 +654,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
63338 */
63339 static inline int skb_queue_empty(const struct sk_buff_head *list)
63340 {
63341- return list->next == (struct sk_buff *)list;
63342+ return list->next == (const struct sk_buff *)list;
63343 }
63344
63345 /**
63346@@ -667,7 +667,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
63347 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63348 const struct sk_buff *skb)
63349 {
63350- return skb->next == (struct sk_buff *)list;
63351+ return skb->next == (const struct sk_buff *)list;
63352 }
63353
63354 /**
63355@@ -680,7 +680,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63356 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
63357 const struct sk_buff *skb)
63358 {
63359- return skb->prev == (struct sk_buff *)list;
63360+ return skb->prev == (const struct sk_buff *)list;
63361 }
63362
63363 /**
63364@@ -1545,7 +1545,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
63365 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
63366 */
63367 #ifndef NET_SKB_PAD
63368-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
63369+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
63370 #endif
63371
63372 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
63373diff --git a/include/linux/slab.h b/include/linux/slab.h
63374index 573c809..07e1f43 100644
63375--- a/include/linux/slab.h
63376+++ b/include/linux/slab.h
63377@@ -11,12 +11,20 @@
63378
63379 #include <linux/gfp.h>
63380 #include <linux/types.h>
63381+#include <linux/err.h>
63382
63383 /*
63384 * Flags to pass to kmem_cache_create().
63385 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
63386 */
63387 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
63388+
63389+#ifdef CONFIG_PAX_USERCOPY
63390+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
63391+#else
63392+#define SLAB_USERCOPY 0x00000000UL
63393+#endif
63394+
63395 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
63396 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
63397 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
63398@@ -87,10 +95,13 @@
63399 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
63400 * Both make kfree a no-op.
63401 */
63402-#define ZERO_SIZE_PTR ((void *)16)
63403+#define ZERO_SIZE_PTR \
63404+({ \
63405+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
63406+ (void *)(-MAX_ERRNO-1L); \
63407+})
63408
63409-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
63410- (unsigned long)ZERO_SIZE_PTR)
63411+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
63412
63413 /*
63414 * struct kmem_cache related prototypes
63415@@ -156,11 +167,12 @@ unsigned int kmem_cache_size(struct kmem_cache *);
63416 /*
63417 * Common kmalloc functions provided by all allocators
63418 */
63419-void * __must_check __krealloc(const void *, size_t, gfp_t);
63420-void * __must_check krealloc(const void *, size_t, gfp_t);
63421+void * __must_check __krealloc(const void *, size_t, gfp_t) __size_overflow(2);
63422+void * __must_check krealloc(const void *, size_t, gfp_t) __size_overflow(2);
63423 void kfree(const void *);
63424 void kzfree(const void *);
63425 size_t ksize(const void *);
63426+void check_object_size(const void *ptr, unsigned long n, bool to);
63427
63428 /*
63429 * Allocator specific definitions. These are mainly used to establish optimized
63430@@ -287,7 +299,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
63431 */
63432 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
63433 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
63434-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
63435+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
63436 #define kmalloc_track_caller(size, flags) \
63437 __kmalloc_track_caller(size, flags, _RET_IP_)
63438 #else
63439@@ -306,7 +318,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
63440 */
63441 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
63442 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
63443-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
63444+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
63445 #define kmalloc_node_track_caller(size, flags, node) \
63446 __kmalloc_node_track_caller(size, flags, node, \
63447 _RET_IP_)
63448diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
63449index fbd1117..c0bd874 100644
63450--- a/include/linux/slab_def.h
63451+++ b/include/linux/slab_def.h
63452@@ -66,10 +66,10 @@ struct kmem_cache {
63453 unsigned long node_allocs;
63454 unsigned long node_frees;
63455 unsigned long node_overflow;
63456- atomic_t allochit;
63457- atomic_t allocmiss;
63458- atomic_t freehit;
63459- atomic_t freemiss;
63460+ atomic_unchecked_t allochit;
63461+ atomic_unchecked_t allocmiss;
63462+ atomic_unchecked_t freehit;
63463+ atomic_unchecked_t freemiss;
63464
63465 /*
63466 * If debugging is enabled, then the allocator can add additional
63467@@ -107,7 +107,7 @@ struct cache_sizes {
63468 extern struct cache_sizes malloc_sizes[];
63469
63470 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63471-void *__kmalloc(size_t size, gfp_t flags);
63472+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63473
63474 #ifdef CONFIG_TRACING
63475 extern void *kmem_cache_alloc_trace(size_t size,
63476@@ -125,6 +125,7 @@ static inline size_t slab_buffer_size(struct kmem_cache *cachep)
63477 }
63478 #endif
63479
63480+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63481 static __always_inline void *kmalloc(size_t size, gfp_t flags)
63482 {
63483 struct kmem_cache *cachep;
63484@@ -160,7 +161,7 @@ found:
63485 }
63486
63487 #ifdef CONFIG_NUMA
63488-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
63489+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63490 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
63491
63492 #ifdef CONFIG_TRACING
63493@@ -179,6 +180,7 @@ kmem_cache_alloc_node_trace(size_t size,
63494 }
63495 #endif
63496
63497+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63498 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63499 {
63500 struct kmem_cache *cachep;
63501diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
63502index 0ec00b3..65e7e0e 100644
63503--- a/include/linux/slob_def.h
63504+++ b/include/linux/slob_def.h
63505@@ -9,8 +9,9 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
63506 return kmem_cache_alloc_node(cachep, flags, -1);
63507 }
63508
63509-void *__kmalloc_node(size_t size, gfp_t flags, int node);
63510+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63511
63512+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63513 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63514 {
63515 return __kmalloc_node(size, flags, node);
63516@@ -24,11 +25,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63517 * kmalloc is the normal method of allocating memory
63518 * in the kernel.
63519 */
63520+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63521 static __always_inline void *kmalloc(size_t size, gfp_t flags)
63522 {
63523 return __kmalloc_node(size, flags, -1);
63524 }
63525
63526+static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63527 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
63528 {
63529 return kmalloc(size, flags);
63530diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
63531index a32bcfd..d26bd6e 100644
63532--- a/include/linux/slub_def.h
63533+++ b/include/linux/slub_def.h
63534@@ -89,7 +89,7 @@ struct kmem_cache {
63535 struct kmem_cache_order_objects max;
63536 struct kmem_cache_order_objects min;
63537 gfp_t allocflags; /* gfp flags to use on each alloc */
63538- int refcount; /* Refcount for slab cache destroy */
63539+ atomic_t refcount; /* Refcount for slab cache destroy */
63540 void (*ctor)(void *);
63541 int inuse; /* Offset to metadata */
63542 int align; /* Alignment */
63543@@ -204,6 +204,7 @@ static __always_inline int kmalloc_index(size_t size)
63544 * This ought to end up with a global pointer to the right cache
63545 * in kmalloc_caches.
63546 */
63547+static __always_inline struct kmem_cache *kmalloc_slab(size_t size) __size_overflow(1);
63548 static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
63549 {
63550 int index = kmalloc_index(size);
63551@@ -215,9 +216,11 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
63552 }
63553
63554 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63555-void *__kmalloc(size_t size, gfp_t flags);
63556+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
63557
63558 static __always_inline void *
63559+kmalloc_order(size_t size, gfp_t flags, unsigned int order) __size_overflow(1);
63560+static __always_inline void *
63561 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
63562 {
63563 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
63564@@ -256,12 +259,14 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
63565 }
63566 #endif
63567
63568+static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
63569 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
63570 {
63571 unsigned int order = get_order(size);
63572 return kmalloc_order_trace(size, flags, order);
63573 }
63574
63575+static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63576 static __always_inline void *kmalloc(size_t size, gfp_t flags)
63577 {
63578 if (__builtin_constant_p(size)) {
63579@@ -281,7 +286,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
63580 }
63581
63582 #ifdef CONFIG_NUMA
63583-void *__kmalloc_node(size_t size, gfp_t flags, int node);
63584+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63585 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
63586
63587 #ifdef CONFIG_TRACING
63588@@ -298,6 +303,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
63589 }
63590 #endif
63591
63592+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63593 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63594 {
63595 if (__builtin_constant_p(size) &&
63596diff --git a/include/linux/sonet.h b/include/linux/sonet.h
63597index de8832d..0147b46 100644
63598--- a/include/linux/sonet.h
63599+++ b/include/linux/sonet.h
63600@@ -61,7 +61,7 @@ struct sonet_stats {
63601 #include <linux/atomic.h>
63602
63603 struct k_sonet_stats {
63604-#define __HANDLE_ITEM(i) atomic_t i
63605+#define __HANDLE_ITEM(i) atomic_unchecked_t i
63606 __SONET_ITEMS
63607 #undef __HANDLE_ITEM
63608 };
63609diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
63610index 2c5993a..b0e79f0 100644
63611--- a/include/linux/sunrpc/clnt.h
63612+++ b/include/linux/sunrpc/clnt.h
63613@@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
63614 {
63615 switch (sap->sa_family) {
63616 case AF_INET:
63617- return ntohs(((struct sockaddr_in *)sap)->sin_port);
63618+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
63619 case AF_INET6:
63620- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
63621+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
63622 }
63623 return 0;
63624 }
63625@@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
63626 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
63627 const struct sockaddr *src)
63628 {
63629- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
63630+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
63631 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
63632
63633 dsin->sin_family = ssin->sin_family;
63634@@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
63635 if (sa->sa_family != AF_INET6)
63636 return 0;
63637
63638- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
63639+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
63640 }
63641
63642 #endif /* __KERNEL__ */
63643diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
63644index e775689..9e206d9 100644
63645--- a/include/linux/sunrpc/sched.h
63646+++ b/include/linux/sunrpc/sched.h
63647@@ -105,6 +105,7 @@ struct rpc_call_ops {
63648 void (*rpc_call_done)(struct rpc_task *, void *);
63649 void (*rpc_release)(void *);
63650 };
63651+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
63652
63653 struct rpc_task_setup {
63654 struct rpc_task *task;
63655diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
63656index c14fe86..393245e 100644
63657--- a/include/linux/sunrpc/svc_rdma.h
63658+++ b/include/linux/sunrpc/svc_rdma.h
63659@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
63660 extern unsigned int svcrdma_max_requests;
63661 extern unsigned int svcrdma_max_req_size;
63662
63663-extern atomic_t rdma_stat_recv;
63664-extern atomic_t rdma_stat_read;
63665-extern atomic_t rdma_stat_write;
63666-extern atomic_t rdma_stat_sq_starve;
63667-extern atomic_t rdma_stat_rq_starve;
63668-extern atomic_t rdma_stat_rq_poll;
63669-extern atomic_t rdma_stat_rq_prod;
63670-extern atomic_t rdma_stat_sq_poll;
63671-extern atomic_t rdma_stat_sq_prod;
63672+extern atomic_unchecked_t rdma_stat_recv;
63673+extern atomic_unchecked_t rdma_stat_read;
63674+extern atomic_unchecked_t rdma_stat_write;
63675+extern atomic_unchecked_t rdma_stat_sq_starve;
63676+extern atomic_unchecked_t rdma_stat_rq_starve;
63677+extern atomic_unchecked_t rdma_stat_rq_poll;
63678+extern atomic_unchecked_t rdma_stat_rq_prod;
63679+extern atomic_unchecked_t rdma_stat_sq_poll;
63680+extern atomic_unchecked_t rdma_stat_sq_prod;
63681
63682 #define RPCRDMA_VERSION 1
63683
63684diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
63685index bb9127d..34ab358 100644
63686--- a/include/linux/sysctl.h
63687+++ b/include/linux/sysctl.h
63688@@ -155,7 +155,11 @@ enum
63689 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
63690 };
63691
63692-
63693+#ifdef CONFIG_PAX_SOFTMODE
63694+enum {
63695+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
63696+};
63697+#endif
63698
63699 /* CTL_VM names: */
63700 enum
63701@@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
63702
63703 extern int proc_dostring(struct ctl_table *, int,
63704 void __user *, size_t *, loff_t *);
63705+extern int proc_dostring_modpriv(struct ctl_table *, int,
63706+ void __user *, size_t *, loff_t *);
63707 extern int proc_dointvec(struct ctl_table *, int,
63708 void __user *, size_t *, loff_t *);
63709 extern int proc_dointvec_minmax(struct ctl_table *, int,
63710diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
63711index a71a292..51bd91d 100644
63712--- a/include/linux/tracehook.h
63713+++ b/include/linux/tracehook.h
63714@@ -54,12 +54,12 @@ struct linux_binprm;
63715 /*
63716 * ptrace report for syscall entry and exit looks identical.
63717 */
63718-static inline void ptrace_report_syscall(struct pt_regs *regs)
63719+static inline int ptrace_report_syscall(struct pt_regs *regs)
63720 {
63721 int ptrace = current->ptrace;
63722
63723 if (!(ptrace & PT_PTRACED))
63724- return;
63725+ return 0;
63726
63727 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
63728
63729@@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
63730 send_sig(current->exit_code, current, 1);
63731 current->exit_code = 0;
63732 }
63733+
63734+ return fatal_signal_pending(current);
63735 }
63736
63737 /**
63738@@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
63739 static inline __must_check int tracehook_report_syscall_entry(
63740 struct pt_regs *regs)
63741 {
63742- ptrace_report_syscall(regs);
63743- return 0;
63744+ return ptrace_report_syscall(regs);
63745 }
63746
63747 /**
63748diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
63749index ff7dc08..893e1bd 100644
63750--- a/include/linux/tty_ldisc.h
63751+++ b/include/linux/tty_ldisc.h
63752@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
63753
63754 struct module *owner;
63755
63756- int refcount;
63757+ atomic_t refcount;
63758 };
63759
63760 struct tty_ldisc {
63761diff --git a/include/linux/types.h b/include/linux/types.h
63762index e5fa503..df6e8a4 100644
63763--- a/include/linux/types.h
63764+++ b/include/linux/types.h
63765@@ -214,10 +214,26 @@ typedef struct {
63766 int counter;
63767 } atomic_t;
63768
63769+#ifdef CONFIG_PAX_REFCOUNT
63770+typedef struct {
63771+ int counter;
63772+} atomic_unchecked_t;
63773+#else
63774+typedef atomic_t atomic_unchecked_t;
63775+#endif
63776+
63777 #ifdef CONFIG_64BIT
63778 typedef struct {
63779 long counter;
63780 } atomic64_t;
63781+
63782+#ifdef CONFIG_PAX_REFCOUNT
63783+typedef struct {
63784+ long counter;
63785+} atomic64_unchecked_t;
63786+#else
63787+typedef atomic64_t atomic64_unchecked_t;
63788+#endif
63789 #endif
63790
63791 struct list_head {
63792diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
63793index 5ca0951..53a2fff 100644
63794--- a/include/linux/uaccess.h
63795+++ b/include/linux/uaccess.h
63796@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
63797 long ret; \
63798 mm_segment_t old_fs = get_fs(); \
63799 \
63800- set_fs(KERNEL_DS); \
63801 pagefault_disable(); \
63802- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
63803- pagefault_enable(); \
63804+ set_fs(KERNEL_DS); \
63805+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
63806 set_fs(old_fs); \
63807+ pagefault_enable(); \
63808 ret; \
63809 })
63810
63811@@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size);
63812 * Safely write to address @dst from the buffer at @src. If a kernel fault
63813 * happens, handle that and return -EFAULT.
63814 */
63815-extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
63816+extern long notrace probe_kernel_write(void *dst, const void *src, size_t size) __size_overflow(3);
63817 extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
63818
63819 #endif /* __LINUX_UACCESS_H__ */
63820diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
63821index 99c1b4d..bb94261 100644
63822--- a/include/linux/unaligned/access_ok.h
63823+++ b/include/linux/unaligned/access_ok.h
63824@@ -6,32 +6,32 @@
63825
63826 static inline u16 get_unaligned_le16(const void *p)
63827 {
63828- return le16_to_cpup((__le16 *)p);
63829+ return le16_to_cpup((const __le16 *)p);
63830 }
63831
63832 static inline u32 get_unaligned_le32(const void *p)
63833 {
63834- return le32_to_cpup((__le32 *)p);
63835+ return le32_to_cpup((const __le32 *)p);
63836 }
63837
63838 static inline u64 get_unaligned_le64(const void *p)
63839 {
63840- return le64_to_cpup((__le64 *)p);
63841+ return le64_to_cpup((const __le64 *)p);
63842 }
63843
63844 static inline u16 get_unaligned_be16(const void *p)
63845 {
63846- return be16_to_cpup((__be16 *)p);
63847+ return be16_to_cpup((const __be16 *)p);
63848 }
63849
63850 static inline u32 get_unaligned_be32(const void *p)
63851 {
63852- return be32_to_cpup((__be32 *)p);
63853+ return be32_to_cpup((const __be32 *)p);
63854 }
63855
63856 static inline u64 get_unaligned_be64(const void *p)
63857 {
63858- return be64_to_cpup((__be64 *)p);
63859+ return be64_to_cpup((const __be64 *)p);
63860 }
63861
63862 static inline void put_unaligned_le16(u16 val, void *p)
63863diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
63864index 0d3f988..000f101 100644
63865--- a/include/linux/usb/renesas_usbhs.h
63866+++ b/include/linux/usb/renesas_usbhs.h
63867@@ -39,7 +39,7 @@ enum {
63868 */
63869 struct renesas_usbhs_driver_callback {
63870 int (*notify_hotplug)(struct platform_device *pdev);
63871-};
63872+} __no_const;
63873
63874 /*
63875 * callback functions for platform
63876@@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
63877 * VBUS control is needed for Host
63878 */
63879 int (*set_vbus)(struct platform_device *pdev, int enable);
63880-};
63881+} __no_const;
63882
63883 /*
63884 * parameters for renesas usbhs
63885diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
63886index 6f8fbcf..8259001 100644
63887--- a/include/linux/vermagic.h
63888+++ b/include/linux/vermagic.h
63889@@ -25,9 +25,35 @@
63890 #define MODULE_ARCH_VERMAGIC ""
63891 #endif
63892
63893+#ifdef CONFIG_PAX_REFCOUNT
63894+#define MODULE_PAX_REFCOUNT "REFCOUNT "
63895+#else
63896+#define MODULE_PAX_REFCOUNT ""
63897+#endif
63898+
63899+#ifdef CONSTIFY_PLUGIN
63900+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
63901+#else
63902+#define MODULE_CONSTIFY_PLUGIN ""
63903+#endif
63904+
63905+#ifdef STACKLEAK_PLUGIN
63906+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
63907+#else
63908+#define MODULE_STACKLEAK_PLUGIN ""
63909+#endif
63910+
63911+#ifdef CONFIG_GRKERNSEC
63912+#define MODULE_GRSEC "GRSEC "
63913+#else
63914+#define MODULE_GRSEC ""
63915+#endif
63916+
63917 #define VERMAGIC_STRING \
63918 UTS_RELEASE " " \
63919 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
63920 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
63921- MODULE_ARCH_VERMAGIC
63922+ MODULE_ARCH_VERMAGIC \
63923+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
63924+ MODULE_GRSEC
63925
63926diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
63927index dcdfc2b..cce598d 100644
63928--- a/include/linux/vmalloc.h
63929+++ b/include/linux/vmalloc.h
63930@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
63931 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
63932 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
63933 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
63934+
63935+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
63936+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
63937+#endif
63938+
63939 /* bits [20..32] reserved for arch specific ioremap internals */
63940
63941 /*
63942@@ -51,18 +56,18 @@ static inline void vmalloc_init(void)
63943 }
63944 #endif
63945
63946-extern void *vmalloc(unsigned long size);
63947-extern void *vzalloc(unsigned long size);
63948-extern void *vmalloc_user(unsigned long size);
63949-extern void *vmalloc_node(unsigned long size, int node);
63950-extern void *vzalloc_node(unsigned long size, int node);
63951-extern void *vmalloc_exec(unsigned long size);
63952-extern void *vmalloc_32(unsigned long size);
63953-extern void *vmalloc_32_user(unsigned long size);
63954-extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
63955+extern void *vmalloc(unsigned long size) __size_overflow(1);
63956+extern void *vzalloc(unsigned long size) __size_overflow(1);
63957+extern void *vmalloc_user(unsigned long size) __size_overflow(1);
63958+extern void *vmalloc_node(unsigned long size, int node) __size_overflow(1);
63959+extern void *vzalloc_node(unsigned long size, int node) __size_overflow(1);
63960+extern void *vmalloc_exec(unsigned long size) __size_overflow(1);
63961+extern void *vmalloc_32(unsigned long size) __size_overflow(1);
63962+extern void *vmalloc_32_user(unsigned long size) __size_overflow(1);
63963+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __size_overflow(1);
63964 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
63965 unsigned long start, unsigned long end, gfp_t gfp_mask,
63966- pgprot_t prot, int node, void *caller);
63967+ pgprot_t prot, int node, void *caller) __size_overflow(1);
63968 extern void vfree(const void *addr);
63969
63970 extern void *vmap(struct page **pages, unsigned int count,
63971@@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
63972 extern void free_vm_area(struct vm_struct *area);
63973
63974 /* for /dev/kmem */
63975-extern long vread(char *buf, char *addr, unsigned long count);
63976-extern long vwrite(char *buf, char *addr, unsigned long count);
63977+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
63978+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
63979
63980 /*
63981 * Internals. Dont't use..
63982diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63983index 65efb92..137adbb 100644
63984--- a/include/linux/vmstat.h
63985+++ b/include/linux/vmstat.h
63986@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
63987 /*
63988 * Zone based page accounting with per cpu differentials.
63989 */
63990-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63991+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63992
63993 static inline void zone_page_state_add(long x, struct zone *zone,
63994 enum zone_stat_item item)
63995 {
63996- atomic_long_add(x, &zone->vm_stat[item]);
63997- atomic_long_add(x, &vm_stat[item]);
63998+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63999+ atomic_long_add_unchecked(x, &vm_stat[item]);
64000 }
64001
64002 static inline unsigned long global_page_state(enum zone_stat_item item)
64003 {
64004- long x = atomic_long_read(&vm_stat[item]);
64005+ long x = atomic_long_read_unchecked(&vm_stat[item]);
64006 #ifdef CONFIG_SMP
64007 if (x < 0)
64008 x = 0;
64009@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
64010 static inline unsigned long zone_page_state(struct zone *zone,
64011 enum zone_stat_item item)
64012 {
64013- long x = atomic_long_read(&zone->vm_stat[item]);
64014+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
64015 #ifdef CONFIG_SMP
64016 if (x < 0)
64017 x = 0;
64018@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
64019 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
64020 enum zone_stat_item item)
64021 {
64022- long x = atomic_long_read(&zone->vm_stat[item]);
64023+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
64024
64025 #ifdef CONFIG_SMP
64026 int cpu;
64027@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
64028
64029 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
64030 {
64031- atomic_long_inc(&zone->vm_stat[item]);
64032- atomic_long_inc(&vm_stat[item]);
64033+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
64034+ atomic_long_inc_unchecked(&vm_stat[item]);
64035 }
64036
64037 static inline void __inc_zone_page_state(struct page *page,
64038@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
64039
64040 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
64041 {
64042- atomic_long_dec(&zone->vm_stat[item]);
64043- atomic_long_dec(&vm_stat[item]);
64044+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
64045+ atomic_long_dec_unchecked(&vm_stat[item]);
64046 }
64047
64048 static inline void __dec_zone_page_state(struct page *page,
64049diff --git a/include/linux/xattr.h b/include/linux/xattr.h
64050index e5d1220..ef6e406 100644
64051--- a/include/linux/xattr.h
64052+++ b/include/linux/xattr.h
64053@@ -57,6 +57,11 @@
64054 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
64055 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
64056
64057+/* User namespace */
64058+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
64059+#define XATTR_PAX_FLAGS_SUFFIX "flags"
64060+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
64061+
64062 #ifdef __KERNEL__
64063
64064 #include <linux/types.h>
64065diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
64066index 4aeff96..b378cdc 100644
64067--- a/include/media/saa7146_vv.h
64068+++ b/include/media/saa7146_vv.h
64069@@ -163,7 +163,7 @@ struct saa7146_ext_vv
64070 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
64071
64072 /* the extension can override this */
64073- struct v4l2_ioctl_ops ops;
64074+ v4l2_ioctl_ops_no_const ops;
64075 /* pointer to the saa7146 core ops */
64076 const struct v4l2_ioctl_ops *core_ops;
64077
64078diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
64079index c7c40f1..4f01585 100644
64080--- a/include/media/v4l2-dev.h
64081+++ b/include/media/v4l2-dev.h
64082@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
64083
64084
64085 struct v4l2_file_operations {
64086- struct module *owner;
64087+ struct module * const owner;
64088 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
64089 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
64090 unsigned int (*poll) (struct file *, struct poll_table_struct *);
64091@@ -68,6 +68,7 @@ struct v4l2_file_operations {
64092 int (*open) (struct file *);
64093 int (*release) (struct file *);
64094 };
64095+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
64096
64097 /*
64098 * Newer version of video_device, handled by videodev2.c
64099diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
64100index 3f5d60f..44210ed 100644
64101--- a/include/media/v4l2-ioctl.h
64102+++ b/include/media/v4l2-ioctl.h
64103@@ -278,7 +278,7 @@ struct v4l2_ioctl_ops {
64104 long (*vidioc_default) (struct file *file, void *fh,
64105 bool valid_prio, int cmd, void *arg);
64106 };
64107-
64108+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
64109
64110 /* v4l debugging and diagnostics */
64111
64112diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
64113index 8d55251..dfe5b0a 100644
64114--- a/include/net/caif/caif_hsi.h
64115+++ b/include/net/caif/caif_hsi.h
64116@@ -98,7 +98,7 @@ struct cfhsi_drv {
64117 void (*rx_done_cb) (struct cfhsi_drv *drv);
64118 void (*wake_up_cb) (struct cfhsi_drv *drv);
64119 void (*wake_down_cb) (struct cfhsi_drv *drv);
64120-};
64121+} __no_const;
64122
64123 /* Structure implemented by HSI device. */
64124 struct cfhsi_dev {
64125diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
64126index 9e5425b..8136ffc 100644
64127--- a/include/net/caif/cfctrl.h
64128+++ b/include/net/caif/cfctrl.h
64129@@ -52,7 +52,7 @@ struct cfctrl_rsp {
64130 void (*radioset_rsp)(void);
64131 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
64132 struct cflayer *client_layer);
64133-};
64134+} __no_const;
64135
64136 /* Link Setup Parameters for CAIF-Links. */
64137 struct cfctrl_link_param {
64138@@ -101,8 +101,8 @@ struct cfctrl_request_info {
64139 struct cfctrl {
64140 struct cfsrvl serv;
64141 struct cfctrl_rsp res;
64142- atomic_t req_seq_no;
64143- atomic_t rsp_seq_no;
64144+ atomic_unchecked_t req_seq_no;
64145+ atomic_unchecked_t rsp_seq_no;
64146 struct list_head list;
64147 /* Protects from simultaneous access to first_req list */
64148 spinlock_t info_list_lock;
64149diff --git a/include/net/flow.h b/include/net/flow.h
64150index 6c469db..7743b8e 100644
64151--- a/include/net/flow.h
64152+++ b/include/net/flow.h
64153@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
64154
64155 extern void flow_cache_flush(void);
64156 extern void flow_cache_flush_deferred(void);
64157-extern atomic_t flow_cache_genid;
64158+extern atomic_unchecked_t flow_cache_genid;
64159
64160 #endif
64161diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
64162index b94765e..053f68b 100644
64163--- a/include/net/inetpeer.h
64164+++ b/include/net/inetpeer.h
64165@@ -48,8 +48,8 @@ struct inet_peer {
64166 */
64167 union {
64168 struct {
64169- atomic_t rid; /* Frag reception counter */
64170- atomic_t ip_id_count; /* IP ID for the next packet */
64171+ atomic_unchecked_t rid; /* Frag reception counter */
64172+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
64173 __u32 tcp_ts;
64174 __u32 tcp_ts_stamp;
64175 };
64176@@ -115,11 +115,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
64177 more++;
64178 inet_peer_refcheck(p);
64179 do {
64180- old = atomic_read(&p->ip_id_count);
64181+ old = atomic_read_unchecked(&p->ip_id_count);
64182 new = old + more;
64183 if (!new)
64184 new = 1;
64185- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
64186+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
64187 return new;
64188 }
64189
64190diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
64191index 10422ef..662570f 100644
64192--- a/include/net/ip_fib.h
64193+++ b/include/net/ip_fib.h
64194@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
64195
64196 #define FIB_RES_SADDR(net, res) \
64197 ((FIB_RES_NH(res).nh_saddr_genid == \
64198- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
64199+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
64200 FIB_RES_NH(res).nh_saddr : \
64201 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
64202 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
64203diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
64204index ebe517f..1bd286b 100644
64205--- a/include/net/ip_vs.h
64206+++ b/include/net/ip_vs.h
64207@@ -509,7 +509,7 @@ struct ip_vs_conn {
64208 struct ip_vs_conn *control; /* Master control connection */
64209 atomic_t n_control; /* Number of controlled ones */
64210 struct ip_vs_dest *dest; /* real server */
64211- atomic_t in_pkts; /* incoming packet counter */
64212+ atomic_unchecked_t in_pkts; /* incoming packet counter */
64213
64214 /* packet transmitter for different forwarding methods. If it
64215 mangles the packet, it must return NF_DROP or better NF_STOLEN,
64216@@ -647,7 +647,7 @@ struct ip_vs_dest {
64217 __be16 port; /* port number of the server */
64218 union nf_inet_addr addr; /* IP address of the server */
64219 volatile unsigned flags; /* dest status flags */
64220- atomic_t conn_flags; /* flags to copy to conn */
64221+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
64222 atomic_t weight; /* server weight */
64223
64224 atomic_t refcnt; /* reference counter */
64225diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
64226index 69b610a..fe3962c 100644
64227--- a/include/net/irda/ircomm_core.h
64228+++ b/include/net/irda/ircomm_core.h
64229@@ -51,7 +51,7 @@ typedef struct {
64230 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
64231 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
64232 struct ircomm_info *);
64233-} call_t;
64234+} __no_const call_t;
64235
64236 struct ircomm_cb {
64237 irda_queue_t queue;
64238diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
64239index 59ba38bc..d515662 100644
64240--- a/include/net/irda/ircomm_tty.h
64241+++ b/include/net/irda/ircomm_tty.h
64242@@ -35,6 +35,7 @@
64243 #include <linux/termios.h>
64244 #include <linux/timer.h>
64245 #include <linux/tty.h> /* struct tty_struct */
64246+#include <asm/local.h>
64247
64248 #include <net/irda/irias_object.h>
64249 #include <net/irda/ircomm_core.h>
64250@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
64251 unsigned short close_delay;
64252 unsigned short closing_wait; /* time to wait before closing */
64253
64254- int open_count;
64255- int blocked_open; /* # of blocked opens */
64256+ local_t open_count;
64257+ local_t blocked_open; /* # of blocked opens */
64258
64259 /* Protect concurent access to :
64260 * o self->open_count
64261diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
64262index 0954ec9..7413562 100644
64263--- a/include/net/iucv/af_iucv.h
64264+++ b/include/net/iucv/af_iucv.h
64265@@ -138,7 +138,7 @@ struct iucv_sock {
64266 struct iucv_sock_list {
64267 struct hlist_head head;
64268 rwlock_t lock;
64269- atomic_t autobind_name;
64270+ atomic_unchecked_t autobind_name;
64271 };
64272
64273 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
64274diff --git a/include/net/neighbour.h b/include/net/neighbour.h
64275index 34c996f..bb3b4d4 100644
64276--- a/include/net/neighbour.h
64277+++ b/include/net/neighbour.h
64278@@ -123,7 +123,7 @@ struct neigh_ops {
64279 void (*error_report)(struct neighbour *, struct sk_buff *);
64280 int (*output)(struct neighbour *, struct sk_buff *);
64281 int (*connected_output)(struct neighbour *, struct sk_buff *);
64282-};
64283+} __do_const;
64284
64285 struct pneigh_entry {
64286 struct pneigh_entry *next;
64287diff --git a/include/net/netlink.h b/include/net/netlink.h
64288index cb1f350..3279d2c 100644
64289--- a/include/net/netlink.h
64290+++ b/include/net/netlink.h
64291@@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
64292 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
64293 {
64294 if (mark)
64295- skb_trim(skb, (unsigned char *) mark - skb->data);
64296+ skb_trim(skb, (const unsigned char *) mark - skb->data);
64297 }
64298
64299 /**
64300diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
64301index bbd023a..97c6d0d 100644
64302--- a/include/net/netns/ipv4.h
64303+++ b/include/net/netns/ipv4.h
64304@@ -57,8 +57,8 @@ struct netns_ipv4 {
64305 unsigned int sysctl_ping_group_range[2];
64306 long sysctl_tcp_mem[3];
64307
64308- atomic_t rt_genid;
64309- atomic_t dev_addr_genid;
64310+ atomic_unchecked_t rt_genid;
64311+ atomic_unchecked_t dev_addr_genid;
64312
64313 #ifdef CONFIG_IP_MROUTE
64314 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
64315diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
64316index d368561..96aaa17 100644
64317--- a/include/net/sctp/sctp.h
64318+++ b/include/net/sctp/sctp.h
64319@@ -318,9 +318,9 @@ do { \
64320
64321 #else /* SCTP_DEBUG */
64322
64323-#define SCTP_DEBUG_PRINTK(whatever...)
64324-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
64325-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
64326+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
64327+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
64328+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
64329 #define SCTP_ENABLE_DEBUG
64330 #define SCTP_DISABLE_DEBUG
64331 #define SCTP_ASSERT(expr, str, func)
64332diff --git a/include/net/sock.h b/include/net/sock.h
64333index 91c1c8b..15ae923 100644
64334--- a/include/net/sock.h
64335+++ b/include/net/sock.h
64336@@ -299,7 +299,7 @@ struct sock {
64337 #ifdef CONFIG_RPS
64338 __u32 sk_rxhash;
64339 #endif
64340- atomic_t sk_drops;
64341+ atomic_unchecked_t sk_drops;
64342 int sk_rcvbuf;
64343
64344 struct sk_filter __rcu *sk_filter;
64345@@ -1660,7 +1660,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
64346 }
64347
64348 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
64349- char __user *from, char *to,
64350+ char __user *from, unsigned char *to,
64351 int copy, int offset)
64352 {
64353 if (skb->ip_summed == CHECKSUM_NONE) {
64354diff --git a/include/net/tcp.h b/include/net/tcp.h
64355index 2d80c29..aa07caf 100644
64356--- a/include/net/tcp.h
64357+++ b/include/net/tcp.h
64358@@ -1426,7 +1426,7 @@ struct tcp_seq_afinfo {
64359 char *name;
64360 sa_family_t family;
64361 const struct file_operations *seq_fops;
64362- struct seq_operations seq_ops;
64363+ seq_operations_no_const seq_ops;
64364 };
64365
64366 struct tcp_iter_state {
64367diff --git a/include/net/udp.h b/include/net/udp.h
64368index e39592f..fef9680 100644
64369--- a/include/net/udp.h
64370+++ b/include/net/udp.h
64371@@ -243,7 +243,7 @@ struct udp_seq_afinfo {
64372 sa_family_t family;
64373 struct udp_table *udp_table;
64374 const struct file_operations *seq_fops;
64375- struct seq_operations seq_ops;
64376+ seq_operations_no_const seq_ops;
64377 };
64378
64379 struct udp_iter_state {
64380diff --git a/include/net/xfrm.h b/include/net/xfrm.h
64381index 89174e2..1f82598 100644
64382--- a/include/net/xfrm.h
64383+++ b/include/net/xfrm.h
64384@@ -505,7 +505,7 @@ struct xfrm_policy {
64385 struct timer_list timer;
64386
64387 struct flow_cache_object flo;
64388- atomic_t genid;
64389+ atomic_unchecked_t genid;
64390 u32 priority;
64391 u32 index;
64392 struct xfrm_mark mark;
64393diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
64394index 1a046b1..ee0bef0 100644
64395--- a/include/rdma/iw_cm.h
64396+++ b/include/rdma/iw_cm.h
64397@@ -122,7 +122,7 @@ struct iw_cm_verbs {
64398 int backlog);
64399
64400 int (*destroy_listen)(struct iw_cm_id *cm_id);
64401-};
64402+} __no_const;
64403
64404 /**
64405 * iw_create_cm_id - Create an IW CM identifier.
64406diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
64407index 6a3922f..0b73022 100644
64408--- a/include/scsi/libfc.h
64409+++ b/include/scsi/libfc.h
64410@@ -748,6 +748,7 @@ struct libfc_function_template {
64411 */
64412 void (*disc_stop_final) (struct fc_lport *);
64413 };
64414+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
64415
64416 /**
64417 * struct fc_disc - Discovery context
64418@@ -851,7 +852,7 @@ struct fc_lport {
64419 struct fc_vport *vport;
64420
64421 /* Operational Information */
64422- struct libfc_function_template tt;
64423+ libfc_function_template_no_const tt;
64424 u8 link_up;
64425 u8 qfull;
64426 enum fc_lport_state state;
64427diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
64428index 77273f2..dd4031f 100644
64429--- a/include/scsi/scsi_device.h
64430+++ b/include/scsi/scsi_device.h
64431@@ -161,9 +161,9 @@ struct scsi_device {
64432 unsigned int max_device_blocked; /* what device_blocked counts down from */
64433 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
64434
64435- atomic_t iorequest_cnt;
64436- atomic_t iodone_cnt;
64437- atomic_t ioerr_cnt;
64438+ atomic_unchecked_t iorequest_cnt;
64439+ atomic_unchecked_t iodone_cnt;
64440+ atomic_unchecked_t ioerr_cnt;
64441
64442 struct device sdev_gendev,
64443 sdev_dev;
64444diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
64445index 2a65167..91e01f8 100644
64446--- a/include/scsi/scsi_transport_fc.h
64447+++ b/include/scsi/scsi_transport_fc.h
64448@@ -711,7 +711,7 @@ struct fc_function_template {
64449 unsigned long show_host_system_hostname:1;
64450
64451 unsigned long disable_target_scan:1;
64452-};
64453+} __do_const;
64454
64455
64456 /**
64457diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
64458index 030b87c..98a6954 100644
64459--- a/include/sound/ak4xxx-adda.h
64460+++ b/include/sound/ak4xxx-adda.h
64461@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
64462 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
64463 unsigned char val);
64464 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
64465-};
64466+} __no_const;
64467
64468 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
64469
64470diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
64471index 8c05e47..2b5df97 100644
64472--- a/include/sound/hwdep.h
64473+++ b/include/sound/hwdep.h
64474@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
64475 struct snd_hwdep_dsp_status *status);
64476 int (*dsp_load)(struct snd_hwdep *hw,
64477 struct snd_hwdep_dsp_image *image);
64478-};
64479+} __no_const;
64480
64481 struct snd_hwdep {
64482 struct snd_card *card;
64483diff --git a/include/sound/info.h b/include/sound/info.h
64484index 9ca1a49..aba1728 100644
64485--- a/include/sound/info.h
64486+++ b/include/sound/info.h
64487@@ -44,7 +44,7 @@ struct snd_info_entry_text {
64488 struct snd_info_buffer *buffer);
64489 void (*write)(struct snd_info_entry *entry,
64490 struct snd_info_buffer *buffer);
64491-};
64492+} __no_const;
64493
64494 struct snd_info_entry_ops {
64495 int (*open)(struct snd_info_entry *entry,
64496diff --git a/include/sound/pcm.h b/include/sound/pcm.h
64497index 0cf91b2..b70cae4 100644
64498--- a/include/sound/pcm.h
64499+++ b/include/sound/pcm.h
64500@@ -81,6 +81,7 @@ struct snd_pcm_ops {
64501 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
64502 int (*ack)(struct snd_pcm_substream *substream);
64503 };
64504+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
64505
64506 /*
64507 *
64508diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
64509index af1b49e..a5d55a5 100644
64510--- a/include/sound/sb16_csp.h
64511+++ b/include/sound/sb16_csp.h
64512@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
64513 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
64514 int (*csp_stop) (struct snd_sb_csp * p);
64515 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
64516-};
64517+} __no_const;
64518
64519 /*
64520 * CSP private data
64521diff --git a/include/sound/soc.h b/include/sound/soc.h
64522index 0992dff..bb366fe 100644
64523--- a/include/sound/soc.h
64524+++ b/include/sound/soc.h
64525@@ -682,7 +682,7 @@ struct snd_soc_platform_driver {
64526 /* platform IO - used for platform DAPM */
64527 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
64528 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
64529-};
64530+} __do_const;
64531
64532 struct snd_soc_platform {
64533 const char *name;
64534@@ -852,7 +852,7 @@ struct snd_soc_pcm_runtime {
64535 struct snd_soc_dai_link *dai_link;
64536 struct mutex pcm_mutex;
64537 enum snd_soc_pcm_subclass pcm_subclass;
64538- struct snd_pcm_ops ops;
64539+ snd_pcm_ops_no_const ops;
64540
64541 unsigned int complete:1;
64542 unsigned int dev_registered:1;
64543diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
64544index 444cd6b..3327cc5 100644
64545--- a/include/sound/ymfpci.h
64546+++ b/include/sound/ymfpci.h
64547@@ -358,7 +358,7 @@ struct snd_ymfpci {
64548 spinlock_t reg_lock;
64549 spinlock_t voice_lock;
64550 wait_queue_head_t interrupt_sleep;
64551- atomic_t interrupt_sleep_count;
64552+ atomic_unchecked_t interrupt_sleep_count;
64553 struct snd_info_entry *proc_entry;
64554 const struct firmware *dsp_microcode;
64555 const struct firmware *controller_microcode;
64556diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
64557index fe73eb8..56388b1 100644
64558--- a/include/target/target_core_base.h
64559+++ b/include/target/target_core_base.h
64560@@ -443,7 +443,7 @@ struct t10_reservation_ops {
64561 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
64562 int (*t10_pr_register)(struct se_cmd *);
64563 int (*t10_pr_clear)(struct se_cmd *);
64564-};
64565+} __no_const;
64566
64567 struct t10_reservation {
64568 /* Reservation effects all target ports */
64569@@ -561,8 +561,8 @@ struct se_cmd {
64570 atomic_t t_se_count;
64571 atomic_t t_task_cdbs_left;
64572 atomic_t t_task_cdbs_ex_left;
64573- atomic_t t_task_cdbs_sent;
64574- atomic_t t_transport_aborted;
64575+ atomic_unchecked_t t_task_cdbs_sent;
64576+ atomic_unchecked_t t_transport_aborted;
64577 atomic_t t_transport_active;
64578 atomic_t t_transport_complete;
64579 atomic_t t_transport_queue_active;
64580@@ -799,7 +799,7 @@ struct se_device {
64581 spinlock_t stats_lock;
64582 /* Active commands on this virtual SE device */
64583 atomic_t simple_cmds;
64584- atomic_t dev_ordered_id;
64585+ atomic_unchecked_t dev_ordered_id;
64586 atomic_t execute_tasks;
64587 atomic_t dev_ordered_sync;
64588 atomic_t dev_qf_count;
64589diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
64590index 1c09820..7f5ec79 100644
64591--- a/include/trace/events/irq.h
64592+++ b/include/trace/events/irq.h
64593@@ -36,7 +36,7 @@ struct softirq_action;
64594 */
64595 TRACE_EVENT(irq_handler_entry,
64596
64597- TP_PROTO(int irq, struct irqaction *action),
64598+ TP_PROTO(int irq, const struct irqaction *action),
64599
64600 TP_ARGS(irq, action),
64601
64602@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
64603 */
64604 TRACE_EVENT(irq_handler_exit,
64605
64606- TP_PROTO(int irq, struct irqaction *action, int ret),
64607+ TP_PROTO(int irq, const struct irqaction *action, int ret),
64608
64609 TP_ARGS(irq, action, ret),
64610
64611diff --git a/include/video/udlfb.h b/include/video/udlfb.h
64612index c41f308..6918de3 100644
64613--- a/include/video/udlfb.h
64614+++ b/include/video/udlfb.h
64615@@ -52,10 +52,10 @@ struct dlfb_data {
64616 u32 pseudo_palette[256];
64617 int blank_mode; /*one of FB_BLANK_ */
64618 /* blit-only rendering path metrics, exposed through sysfs */
64619- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64620- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
64621- atomic_t bytes_sent; /* to usb, after compression including overhead */
64622- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
64623+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64624+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
64625+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
64626+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
64627 };
64628
64629 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
64630diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
64631index 0993a22..32ba2fe 100644
64632--- a/include/video/uvesafb.h
64633+++ b/include/video/uvesafb.h
64634@@ -177,6 +177,7 @@ struct uvesafb_par {
64635 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
64636 u8 pmi_setpal; /* PMI for palette changes */
64637 u16 *pmi_base; /* protected mode interface location */
64638+ u8 *pmi_code; /* protected mode code location */
64639 void *pmi_start;
64640 void *pmi_pal;
64641 u8 *vbe_state_orig; /*
64642diff --git a/init/Kconfig b/init/Kconfig
64643index 3f42cd6..613f41d 100644
64644--- a/init/Kconfig
64645+++ b/init/Kconfig
64646@@ -799,6 +799,7 @@ endif # CGROUPS
64647
64648 config CHECKPOINT_RESTORE
64649 bool "Checkpoint/restore support" if EXPERT
64650+ depends on !GRKERNSEC
64651 default n
64652 help
64653 Enables additional kernel features in a sake of checkpoint/restore.
64654@@ -1249,7 +1250,7 @@ config SLUB_DEBUG
64655
64656 config COMPAT_BRK
64657 bool "Disable heap randomization"
64658- default y
64659+ default n
64660 help
64661 Randomizing heap placement makes heap exploits harder, but it
64662 also breaks ancient binaries (including anything libc5 based).
64663diff --git a/init/do_mounts.c b/init/do_mounts.c
64664index 2974c8b..0b863ae 100644
64665--- a/init/do_mounts.c
64666+++ b/init/do_mounts.c
64667@@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
64668 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
64669 {
64670 struct super_block *s;
64671- int err = sys_mount(name, "/root", fs, flags, data);
64672+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
64673 if (err)
64674 return err;
64675
64676- sys_chdir((const char __user __force *)"/root");
64677+ sys_chdir((const char __force_user *)"/root");
64678 s = current->fs->pwd.dentry->d_sb;
64679 ROOT_DEV = s->s_dev;
64680 printk(KERN_INFO
64681@@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
64682 va_start(args, fmt);
64683 vsprintf(buf, fmt, args);
64684 va_end(args);
64685- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
64686+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
64687 if (fd >= 0) {
64688 sys_ioctl(fd, FDEJECT, 0);
64689 sys_close(fd);
64690 }
64691 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
64692- fd = sys_open("/dev/console", O_RDWR, 0);
64693+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
64694 if (fd >= 0) {
64695 sys_ioctl(fd, TCGETS, (long)&termios);
64696 termios.c_lflag &= ~ICANON;
64697 sys_ioctl(fd, TCSETSF, (long)&termios);
64698- sys_read(fd, &c, 1);
64699+ sys_read(fd, (char __user *)&c, 1);
64700 termios.c_lflag |= ICANON;
64701 sys_ioctl(fd, TCSETSF, (long)&termios);
64702 sys_close(fd);
64703@@ -555,6 +555,6 @@ void __init prepare_namespace(void)
64704 mount_root();
64705 out:
64706 devtmpfs_mount("dev");
64707- sys_mount(".", "/", NULL, MS_MOVE, NULL);
64708- sys_chroot((const char __user __force *)".");
64709+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64710+ sys_chroot((const char __force_user *)".");
64711 }
64712diff --git a/init/do_mounts.h b/init/do_mounts.h
64713index f5b978a..69dbfe8 100644
64714--- a/init/do_mounts.h
64715+++ b/init/do_mounts.h
64716@@ -15,15 +15,15 @@ extern int root_mountflags;
64717
64718 static inline int create_dev(char *name, dev_t dev)
64719 {
64720- sys_unlink(name);
64721- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
64722+ sys_unlink((char __force_user *)name);
64723+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
64724 }
64725
64726 #if BITS_PER_LONG == 32
64727 static inline u32 bstat(char *name)
64728 {
64729 struct stat64 stat;
64730- if (sys_stat64(name, &stat) != 0)
64731+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
64732 return 0;
64733 if (!S_ISBLK(stat.st_mode))
64734 return 0;
64735@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
64736 static inline u32 bstat(char *name)
64737 {
64738 struct stat stat;
64739- if (sys_newstat(name, &stat) != 0)
64740+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
64741 return 0;
64742 if (!S_ISBLK(stat.st_mode))
64743 return 0;
64744diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
64745index 3098a38..253064e 100644
64746--- a/init/do_mounts_initrd.c
64747+++ b/init/do_mounts_initrd.c
64748@@ -44,13 +44,13 @@ static void __init handle_initrd(void)
64749 create_dev("/dev/root.old", Root_RAM0);
64750 /* mount initrd on rootfs' /root */
64751 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
64752- sys_mkdir("/old", 0700);
64753- root_fd = sys_open("/", 0, 0);
64754- old_fd = sys_open("/old", 0, 0);
64755+ sys_mkdir((const char __force_user *)"/old", 0700);
64756+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
64757+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
64758 /* move initrd over / and chdir/chroot in initrd root */
64759- sys_chdir("/root");
64760- sys_mount(".", "/", NULL, MS_MOVE, NULL);
64761- sys_chroot(".");
64762+ sys_chdir((const char __force_user *)"/root");
64763+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64764+ sys_chroot((const char __force_user *)".");
64765
64766 /*
64767 * In case that a resume from disk is carried out by linuxrc or one of
64768@@ -67,15 +67,15 @@ static void __init handle_initrd(void)
64769
64770 /* move initrd to rootfs' /old */
64771 sys_fchdir(old_fd);
64772- sys_mount("/", ".", NULL, MS_MOVE, NULL);
64773+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
64774 /* switch root and cwd back to / of rootfs */
64775 sys_fchdir(root_fd);
64776- sys_chroot(".");
64777+ sys_chroot((const char __force_user *)".");
64778 sys_close(old_fd);
64779 sys_close(root_fd);
64780
64781 if (new_decode_dev(real_root_dev) == Root_RAM0) {
64782- sys_chdir("/old");
64783+ sys_chdir((const char __force_user *)"/old");
64784 return;
64785 }
64786
64787@@ -83,17 +83,17 @@ static void __init handle_initrd(void)
64788 mount_root();
64789
64790 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
64791- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
64792+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
64793 if (!error)
64794 printk("okay\n");
64795 else {
64796- int fd = sys_open("/dev/root.old", O_RDWR, 0);
64797+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
64798 if (error == -ENOENT)
64799 printk("/initrd does not exist. Ignored.\n");
64800 else
64801 printk("failed\n");
64802 printk(KERN_NOTICE "Unmounting old root\n");
64803- sys_umount("/old", MNT_DETACH);
64804+ sys_umount((char __force_user *)"/old", MNT_DETACH);
64805 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
64806 if (fd < 0) {
64807 error = fd;
64808@@ -116,11 +116,11 @@ int __init initrd_load(void)
64809 * mounted in the normal path.
64810 */
64811 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
64812- sys_unlink("/initrd.image");
64813+ sys_unlink((const char __force_user *)"/initrd.image");
64814 handle_initrd();
64815 return 1;
64816 }
64817 }
64818- sys_unlink("/initrd.image");
64819+ sys_unlink((const char __force_user *)"/initrd.image");
64820 return 0;
64821 }
64822diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
64823index 32c4799..c27ee74 100644
64824--- a/init/do_mounts_md.c
64825+++ b/init/do_mounts_md.c
64826@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
64827 partitioned ? "_d" : "", minor,
64828 md_setup_args[ent].device_names);
64829
64830- fd = sys_open(name, 0, 0);
64831+ fd = sys_open((char __force_user *)name, 0, 0);
64832 if (fd < 0) {
64833 printk(KERN_ERR "md: open failed - cannot start "
64834 "array %s\n", name);
64835@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
64836 * array without it
64837 */
64838 sys_close(fd);
64839- fd = sys_open(name, 0, 0);
64840+ fd = sys_open((char __force_user *)name, 0, 0);
64841 sys_ioctl(fd, BLKRRPART, 0);
64842 }
64843 sys_close(fd);
64844@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
64845
64846 wait_for_device_probe();
64847
64848- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
64849+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
64850 if (fd >= 0) {
64851 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
64852 sys_close(fd);
64853diff --git a/init/initramfs.c b/init/initramfs.c
64854index 8216c30..25e8e32 100644
64855--- a/init/initramfs.c
64856+++ b/init/initramfs.c
64857@@ -74,7 +74,7 @@ static void __init free_hash(void)
64858 }
64859 }
64860
64861-static long __init do_utime(char __user *filename, time_t mtime)
64862+static long __init do_utime(__force char __user *filename, time_t mtime)
64863 {
64864 struct timespec t[2];
64865
64866@@ -109,7 +109,7 @@ static void __init dir_utime(void)
64867 struct dir_entry *de, *tmp;
64868 list_for_each_entry_safe(de, tmp, &dir_list, list) {
64869 list_del(&de->list);
64870- do_utime(de->name, de->mtime);
64871+ do_utime((char __force_user *)de->name, de->mtime);
64872 kfree(de->name);
64873 kfree(de);
64874 }
64875@@ -271,7 +271,7 @@ static int __init maybe_link(void)
64876 if (nlink >= 2) {
64877 char *old = find_link(major, minor, ino, mode, collected);
64878 if (old)
64879- return (sys_link(old, collected) < 0) ? -1 : 1;
64880+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
64881 }
64882 return 0;
64883 }
64884@@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
64885 {
64886 struct stat st;
64887
64888- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
64889+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
64890 if (S_ISDIR(st.st_mode))
64891- sys_rmdir(path);
64892+ sys_rmdir((char __force_user *)path);
64893 else
64894- sys_unlink(path);
64895+ sys_unlink((char __force_user *)path);
64896 }
64897 }
64898
64899@@ -305,7 +305,7 @@ static int __init do_name(void)
64900 int openflags = O_WRONLY|O_CREAT;
64901 if (ml != 1)
64902 openflags |= O_TRUNC;
64903- wfd = sys_open(collected, openflags, mode);
64904+ wfd = sys_open((char __force_user *)collected, openflags, mode);
64905
64906 if (wfd >= 0) {
64907 sys_fchown(wfd, uid, gid);
64908@@ -317,17 +317,17 @@ static int __init do_name(void)
64909 }
64910 }
64911 } else if (S_ISDIR(mode)) {
64912- sys_mkdir(collected, mode);
64913- sys_chown(collected, uid, gid);
64914- sys_chmod(collected, mode);
64915+ sys_mkdir((char __force_user *)collected, mode);
64916+ sys_chown((char __force_user *)collected, uid, gid);
64917+ sys_chmod((char __force_user *)collected, mode);
64918 dir_add(collected, mtime);
64919 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
64920 S_ISFIFO(mode) || S_ISSOCK(mode)) {
64921 if (maybe_link() == 0) {
64922- sys_mknod(collected, mode, rdev);
64923- sys_chown(collected, uid, gid);
64924- sys_chmod(collected, mode);
64925- do_utime(collected, mtime);
64926+ sys_mknod((char __force_user *)collected, mode, rdev);
64927+ sys_chown((char __force_user *)collected, uid, gid);
64928+ sys_chmod((char __force_user *)collected, mode);
64929+ do_utime((char __force_user *)collected, mtime);
64930 }
64931 }
64932 return 0;
64933@@ -336,15 +336,15 @@ static int __init do_name(void)
64934 static int __init do_copy(void)
64935 {
64936 if (count >= body_len) {
64937- sys_write(wfd, victim, body_len);
64938+ sys_write(wfd, (char __force_user *)victim, body_len);
64939 sys_close(wfd);
64940- do_utime(vcollected, mtime);
64941+ do_utime((char __force_user *)vcollected, mtime);
64942 kfree(vcollected);
64943 eat(body_len);
64944 state = SkipIt;
64945 return 0;
64946 } else {
64947- sys_write(wfd, victim, count);
64948+ sys_write(wfd, (char __force_user *)victim, count);
64949 body_len -= count;
64950 eat(count);
64951 return 1;
64952@@ -355,9 +355,9 @@ static int __init do_symlink(void)
64953 {
64954 collected[N_ALIGN(name_len) + body_len] = '\0';
64955 clean_path(collected, 0);
64956- sys_symlink(collected + N_ALIGN(name_len), collected);
64957- sys_lchown(collected, uid, gid);
64958- do_utime(collected, mtime);
64959+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
64960+ sys_lchown((char __force_user *)collected, uid, gid);
64961+ do_utime((char __force_user *)collected, mtime);
64962 state = SkipIt;
64963 next_state = Reset;
64964 return 0;
64965diff --git a/init/main.c b/init/main.c
64966index ff49a6d..5fa0429 100644
64967--- a/init/main.c
64968+++ b/init/main.c
64969@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
64970 extern void tc_init(void);
64971 #endif
64972
64973+extern void grsecurity_init(void);
64974+
64975 /*
64976 * Debug helper: via this flag we know that we are in 'early bootup code'
64977 * where only the boot processor is running with IRQ disabled. This means
64978@@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
64979
64980 __setup("reset_devices", set_reset_devices);
64981
64982+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
64983+extern char pax_enter_kernel_user[];
64984+extern char pax_exit_kernel_user[];
64985+extern pgdval_t clone_pgd_mask;
64986+#endif
64987+
64988+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
64989+static int __init setup_pax_nouderef(char *str)
64990+{
64991+#ifdef CONFIG_X86_32
64992+ unsigned int cpu;
64993+ struct desc_struct *gdt;
64994+
64995+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
64996+ gdt = get_cpu_gdt_table(cpu);
64997+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64998+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64999+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
65000+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
65001+ }
65002+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
65003+#else
65004+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
65005+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
65006+ clone_pgd_mask = ~(pgdval_t)0UL;
65007+#endif
65008+
65009+ return 0;
65010+}
65011+early_param("pax_nouderef", setup_pax_nouderef);
65012+#endif
65013+
65014+#ifdef CONFIG_PAX_SOFTMODE
65015+int pax_softmode;
65016+
65017+static int __init setup_pax_softmode(char *str)
65018+{
65019+ get_option(&str, &pax_softmode);
65020+ return 1;
65021+}
65022+__setup("pax_softmode=", setup_pax_softmode);
65023+#endif
65024+
65025 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
65026 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
65027 static const char *panic_later, *panic_param;
65028@@ -675,6 +720,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
65029 {
65030 int count = preempt_count();
65031 int ret;
65032+ const char *msg1 = "", *msg2 = "";
65033
65034 if (initcall_debug)
65035 ret = do_one_initcall_debug(fn);
65036@@ -687,15 +733,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
65037 sprintf(msgbuf, "error code %d ", ret);
65038
65039 if (preempt_count() != count) {
65040- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
65041+ msg1 = " preemption imbalance";
65042 preempt_count() = count;
65043 }
65044 if (irqs_disabled()) {
65045- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
65046+ msg2 = " disabled interrupts";
65047 local_irq_enable();
65048 }
65049- if (msgbuf[0]) {
65050- printk("initcall %pF returned with %s\n", fn, msgbuf);
65051+ if (msgbuf[0] || *msg1 || *msg2) {
65052+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
65053 }
65054
65055 return ret;
65056@@ -814,7 +860,7 @@ static int __init kernel_init(void * unused)
65057 do_basic_setup();
65058
65059 /* Open the /dev/console on the rootfs, this should never fail */
65060- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
65061+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
65062 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
65063
65064 (void) sys_dup(0);
65065@@ -827,11 +873,13 @@ static int __init kernel_init(void * unused)
65066 if (!ramdisk_execute_command)
65067 ramdisk_execute_command = "/init";
65068
65069- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
65070+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
65071 ramdisk_execute_command = NULL;
65072 prepare_namespace();
65073 }
65074
65075+ grsecurity_init();
65076+
65077 /*
65078 * Ok, we have completed the initial bootup, and
65079 * we're essentially up and running. Get rid of the
65080diff --git a/ipc/mqueue.c b/ipc/mqueue.c
65081index 86ee272..773d937 100644
65082--- a/ipc/mqueue.c
65083+++ b/ipc/mqueue.c
65084@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
65085 mq_bytes = (mq_msg_tblsz +
65086 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
65087
65088+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
65089 spin_lock(&mq_lock);
65090 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
65091 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
65092diff --git a/ipc/msg.c b/ipc/msg.c
65093index 7385de2..a8180e08 100644
65094--- a/ipc/msg.c
65095+++ b/ipc/msg.c
65096@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
65097 return security_msg_queue_associate(msq, msgflg);
65098 }
65099
65100+static struct ipc_ops msg_ops = {
65101+ .getnew = newque,
65102+ .associate = msg_security,
65103+ .more_checks = NULL
65104+};
65105+
65106 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
65107 {
65108 struct ipc_namespace *ns;
65109- struct ipc_ops msg_ops;
65110 struct ipc_params msg_params;
65111
65112 ns = current->nsproxy->ipc_ns;
65113
65114- msg_ops.getnew = newque;
65115- msg_ops.associate = msg_security;
65116- msg_ops.more_checks = NULL;
65117-
65118 msg_params.key = key;
65119 msg_params.flg = msgflg;
65120
65121diff --git a/ipc/sem.c b/ipc/sem.c
65122index 5215a81..cfc0cac 100644
65123--- a/ipc/sem.c
65124+++ b/ipc/sem.c
65125@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
65126 return 0;
65127 }
65128
65129+static struct ipc_ops sem_ops = {
65130+ .getnew = newary,
65131+ .associate = sem_security,
65132+ .more_checks = sem_more_checks
65133+};
65134+
65135 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65136 {
65137 struct ipc_namespace *ns;
65138- struct ipc_ops sem_ops;
65139 struct ipc_params sem_params;
65140
65141 ns = current->nsproxy->ipc_ns;
65142@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65143 if (nsems < 0 || nsems > ns->sc_semmsl)
65144 return -EINVAL;
65145
65146- sem_ops.getnew = newary;
65147- sem_ops.associate = sem_security;
65148- sem_ops.more_checks = sem_more_checks;
65149-
65150 sem_params.key = key;
65151 sem_params.flg = semflg;
65152 sem_params.u.nsems = nsems;
65153diff --git a/ipc/shm.c b/ipc/shm.c
65154index b76be5b..859e750 100644
65155--- a/ipc/shm.c
65156+++ b/ipc/shm.c
65157@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
65158 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
65159 #endif
65160
65161+#ifdef CONFIG_GRKERNSEC
65162+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65163+ const time_t shm_createtime, const uid_t cuid,
65164+ const int shmid);
65165+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65166+ const time_t shm_createtime);
65167+#endif
65168+
65169 void shm_init_ns(struct ipc_namespace *ns)
65170 {
65171 ns->shm_ctlmax = SHMMAX;
65172@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
65173 shp->shm_lprid = 0;
65174 shp->shm_atim = shp->shm_dtim = 0;
65175 shp->shm_ctim = get_seconds();
65176+#ifdef CONFIG_GRKERNSEC
65177+ {
65178+ struct timespec timeval;
65179+ do_posix_clock_monotonic_gettime(&timeval);
65180+
65181+ shp->shm_createtime = timeval.tv_sec;
65182+ }
65183+#endif
65184 shp->shm_segsz = size;
65185 shp->shm_nattch = 0;
65186 shp->shm_file = file;
65187@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
65188 return 0;
65189 }
65190
65191+static struct ipc_ops shm_ops = {
65192+ .getnew = newseg,
65193+ .associate = shm_security,
65194+ .more_checks = shm_more_checks
65195+};
65196+
65197 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
65198 {
65199 struct ipc_namespace *ns;
65200- struct ipc_ops shm_ops;
65201 struct ipc_params shm_params;
65202
65203 ns = current->nsproxy->ipc_ns;
65204
65205- shm_ops.getnew = newseg;
65206- shm_ops.associate = shm_security;
65207- shm_ops.more_checks = shm_more_checks;
65208-
65209 shm_params.key = key;
65210 shm_params.flg = shmflg;
65211 shm_params.u.size = size;
65212@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
65213 f_mode = FMODE_READ | FMODE_WRITE;
65214 }
65215 if (shmflg & SHM_EXEC) {
65216+
65217+#ifdef CONFIG_PAX_MPROTECT
65218+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
65219+ goto out;
65220+#endif
65221+
65222 prot |= PROT_EXEC;
65223 acc_mode |= S_IXUGO;
65224 }
65225@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
65226 if (err)
65227 goto out_unlock;
65228
65229+#ifdef CONFIG_GRKERNSEC
65230+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
65231+ shp->shm_perm.cuid, shmid) ||
65232+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
65233+ err = -EACCES;
65234+ goto out_unlock;
65235+ }
65236+#endif
65237+
65238 path = shp->shm_file->f_path;
65239 path_get(&path);
65240 shp->shm_nattch++;
65241+#ifdef CONFIG_GRKERNSEC
65242+ shp->shm_lapid = current->pid;
65243+#endif
65244 size = i_size_read(path.dentry->d_inode);
65245 shm_unlock(shp);
65246
65247diff --git a/kernel/acct.c b/kernel/acct.c
65248index 02e6167..54824f7 100644
65249--- a/kernel/acct.c
65250+++ b/kernel/acct.c
65251@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
65252 */
65253 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
65254 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
65255- file->f_op->write(file, (char *)&ac,
65256+ file->f_op->write(file, (char __force_user *)&ac,
65257 sizeof(acct_t), &file->f_pos);
65258 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
65259 set_fs(fs);
65260diff --git a/kernel/audit.c b/kernel/audit.c
65261index bb0eb5b..cf2a03a 100644
65262--- a/kernel/audit.c
65263+++ b/kernel/audit.c
65264@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
65265 3) suppressed due to audit_rate_limit
65266 4) suppressed due to audit_backlog_limit
65267 */
65268-static atomic_t audit_lost = ATOMIC_INIT(0);
65269+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
65270
65271 /* The netlink socket. */
65272 static struct sock *audit_sock;
65273@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
65274 unsigned long now;
65275 int print;
65276
65277- atomic_inc(&audit_lost);
65278+ atomic_inc_unchecked(&audit_lost);
65279
65280 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
65281
65282@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
65283 printk(KERN_WARNING
65284 "audit: audit_lost=%d audit_rate_limit=%d "
65285 "audit_backlog_limit=%d\n",
65286- atomic_read(&audit_lost),
65287+ atomic_read_unchecked(&audit_lost),
65288 audit_rate_limit,
65289 audit_backlog_limit);
65290 audit_panic(message);
65291@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
65292 status_set.pid = audit_pid;
65293 status_set.rate_limit = audit_rate_limit;
65294 status_set.backlog_limit = audit_backlog_limit;
65295- status_set.lost = atomic_read(&audit_lost);
65296+ status_set.lost = atomic_read_unchecked(&audit_lost);
65297 status_set.backlog = skb_queue_len(&audit_skb_queue);
65298 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
65299 &status_set, sizeof(status_set));
65300diff --git a/kernel/auditsc.c b/kernel/auditsc.c
65301index af1de0f..06dfe57 100644
65302--- a/kernel/auditsc.c
65303+++ b/kernel/auditsc.c
65304@@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
65305 }
65306
65307 /* global counter which is incremented every time something logs in */
65308-static atomic_t session_id = ATOMIC_INIT(0);
65309+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
65310
65311 /**
65312 * audit_set_loginuid - set current task's audit_context loginuid
65313@@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
65314 return -EPERM;
65315 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
65316
65317- sessionid = atomic_inc_return(&session_id);
65318+ sessionid = atomic_inc_return_unchecked(&session_id);
65319 if (context && context->in_syscall) {
65320 struct audit_buffer *ab;
65321
65322diff --git a/kernel/capability.c b/kernel/capability.c
65323index 3f1adb6..c564db0 100644
65324--- a/kernel/capability.c
65325+++ b/kernel/capability.c
65326@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
65327 * before modification is attempted and the application
65328 * fails.
65329 */
65330+ if (tocopy > ARRAY_SIZE(kdata))
65331+ return -EFAULT;
65332+
65333 if (copy_to_user(dataptr, kdata, tocopy
65334 * sizeof(struct __user_cap_data_struct))) {
65335 return -EFAULT;
65336@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
65337 int ret;
65338
65339 rcu_read_lock();
65340- ret = security_capable(__task_cred(t), ns, cap);
65341+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
65342+ gr_task_is_capable(t, __task_cred(t), cap);
65343 rcu_read_unlock();
65344
65345- return (ret == 0);
65346+ return ret;
65347 }
65348
65349 /**
65350@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
65351 int ret;
65352
65353 rcu_read_lock();
65354- ret = security_capable_noaudit(__task_cred(t), ns, cap);
65355+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
65356 rcu_read_unlock();
65357
65358- return (ret == 0);
65359+ return ret;
65360 }
65361
65362 /**
65363@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
65364 BUG();
65365 }
65366
65367- if (security_capable(current_cred(), ns, cap) == 0) {
65368+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
65369 current->flags |= PF_SUPERPRIV;
65370 return true;
65371 }
65372@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
65373 }
65374 EXPORT_SYMBOL(ns_capable);
65375
65376+bool ns_capable_nolog(struct user_namespace *ns, int cap)
65377+{
65378+ if (unlikely(!cap_valid(cap))) {
65379+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
65380+ BUG();
65381+ }
65382+
65383+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
65384+ current->flags |= PF_SUPERPRIV;
65385+ return true;
65386+ }
65387+ return false;
65388+}
65389+EXPORT_SYMBOL(ns_capable_nolog);
65390+
65391 /**
65392 * capable - Determine if the current task has a superior capability in effect
65393 * @cap: The capability to be tested for
65394@@ -408,6 +427,12 @@ bool capable(int cap)
65395 }
65396 EXPORT_SYMBOL(capable);
65397
65398+bool capable_nolog(int cap)
65399+{
65400+ return ns_capable_nolog(&init_user_ns, cap);
65401+}
65402+EXPORT_SYMBOL(capable_nolog);
65403+
65404 /**
65405 * nsown_capable - Check superior capability to one's own user_ns
65406 * @cap: The capability in question
65407diff --git a/kernel/compat.c b/kernel/compat.c
65408index f346ced..aa2b1f4 100644
65409--- a/kernel/compat.c
65410+++ b/kernel/compat.c
65411@@ -13,6 +13,7 @@
65412
65413 #include <linux/linkage.h>
65414 #include <linux/compat.h>
65415+#include <linux/module.h>
65416 #include <linux/errno.h>
65417 #include <linux/time.h>
65418 #include <linux/signal.h>
65419@@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
65420 mm_segment_t oldfs;
65421 long ret;
65422
65423- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
65424+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
65425 oldfs = get_fs();
65426 set_fs(KERNEL_DS);
65427 ret = hrtimer_nanosleep_restart(restart);
65428@@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
65429 oldfs = get_fs();
65430 set_fs(KERNEL_DS);
65431 ret = hrtimer_nanosleep(&tu,
65432- rmtp ? (struct timespec __user *)&rmt : NULL,
65433+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
65434 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
65435 set_fs(oldfs);
65436
65437@@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
65438 mm_segment_t old_fs = get_fs();
65439
65440 set_fs(KERNEL_DS);
65441- ret = sys_sigpending((old_sigset_t __user *) &s);
65442+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
65443 set_fs(old_fs);
65444 if (ret == 0)
65445 ret = put_user(s, set);
65446@@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
65447 old_fs = get_fs();
65448 set_fs(KERNEL_DS);
65449 ret = sys_sigprocmask(how,
65450- set ? (old_sigset_t __user *) &s : NULL,
65451- oset ? (old_sigset_t __user *) &s : NULL);
65452+ set ? (old_sigset_t __force_user *) &s : NULL,
65453+ oset ? (old_sigset_t __force_user *) &s : NULL);
65454 set_fs(old_fs);
65455 if (ret == 0)
65456 if (oset)
65457@@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
65458 mm_segment_t old_fs = get_fs();
65459
65460 set_fs(KERNEL_DS);
65461- ret = sys_old_getrlimit(resource, &r);
65462+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
65463 set_fs(old_fs);
65464
65465 if (!ret) {
65466@@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
65467 mm_segment_t old_fs = get_fs();
65468
65469 set_fs(KERNEL_DS);
65470- ret = sys_getrusage(who, (struct rusage __user *) &r);
65471+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
65472 set_fs(old_fs);
65473
65474 if (ret)
65475@@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
65476 set_fs (KERNEL_DS);
65477 ret = sys_wait4(pid,
65478 (stat_addr ?
65479- (unsigned int __user *) &status : NULL),
65480- options, (struct rusage __user *) &r);
65481+ (unsigned int __force_user *) &status : NULL),
65482+ options, (struct rusage __force_user *) &r);
65483 set_fs (old_fs);
65484
65485 if (ret > 0) {
65486@@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
65487 memset(&info, 0, sizeof(info));
65488
65489 set_fs(KERNEL_DS);
65490- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
65491- uru ? (struct rusage __user *)&ru : NULL);
65492+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
65493+ uru ? (struct rusage __force_user *)&ru : NULL);
65494 set_fs(old_fs);
65495
65496 if ((ret < 0) || (info.si_signo == 0))
65497@@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
65498 oldfs = get_fs();
65499 set_fs(KERNEL_DS);
65500 err = sys_timer_settime(timer_id, flags,
65501- (struct itimerspec __user *) &newts,
65502- (struct itimerspec __user *) &oldts);
65503+ (struct itimerspec __force_user *) &newts,
65504+ (struct itimerspec __force_user *) &oldts);
65505 set_fs(oldfs);
65506 if (!err && old && put_compat_itimerspec(old, &oldts))
65507 return -EFAULT;
65508@@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
65509 oldfs = get_fs();
65510 set_fs(KERNEL_DS);
65511 err = sys_timer_gettime(timer_id,
65512- (struct itimerspec __user *) &ts);
65513+ (struct itimerspec __force_user *) &ts);
65514 set_fs(oldfs);
65515 if (!err && put_compat_itimerspec(setting, &ts))
65516 return -EFAULT;
65517@@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
65518 oldfs = get_fs();
65519 set_fs(KERNEL_DS);
65520 err = sys_clock_settime(which_clock,
65521- (struct timespec __user *) &ts);
65522+ (struct timespec __force_user *) &ts);
65523 set_fs(oldfs);
65524 return err;
65525 }
65526@@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
65527 oldfs = get_fs();
65528 set_fs(KERNEL_DS);
65529 err = sys_clock_gettime(which_clock,
65530- (struct timespec __user *) &ts);
65531+ (struct timespec __force_user *) &ts);
65532 set_fs(oldfs);
65533 if (!err && put_compat_timespec(&ts, tp))
65534 return -EFAULT;
65535@@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
65536
65537 oldfs = get_fs();
65538 set_fs(KERNEL_DS);
65539- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
65540+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
65541 set_fs(oldfs);
65542
65543 err = compat_put_timex(utp, &txc);
65544@@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
65545 oldfs = get_fs();
65546 set_fs(KERNEL_DS);
65547 err = sys_clock_getres(which_clock,
65548- (struct timespec __user *) &ts);
65549+ (struct timespec __force_user *) &ts);
65550 set_fs(oldfs);
65551 if (!err && tp && put_compat_timespec(&ts, tp))
65552 return -EFAULT;
65553@@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
65554 long err;
65555 mm_segment_t oldfs;
65556 struct timespec tu;
65557- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
65558+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
65559
65560- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
65561+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
65562 oldfs = get_fs();
65563 set_fs(KERNEL_DS);
65564 err = clock_nanosleep_restart(restart);
65565@@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
65566 oldfs = get_fs();
65567 set_fs(KERNEL_DS);
65568 err = sys_clock_nanosleep(which_clock, flags,
65569- (struct timespec __user *) &in,
65570- (struct timespec __user *) &out);
65571+ (struct timespec __force_user *) &in,
65572+ (struct timespec __force_user *) &out);
65573 set_fs(oldfs);
65574
65575 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
65576diff --git a/kernel/configs.c b/kernel/configs.c
65577index 42e8fa0..9e7406b 100644
65578--- a/kernel/configs.c
65579+++ b/kernel/configs.c
65580@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
65581 struct proc_dir_entry *entry;
65582
65583 /* create the current config file */
65584+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
65585+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
65586+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
65587+ &ikconfig_file_ops);
65588+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65589+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
65590+ &ikconfig_file_ops);
65591+#endif
65592+#else
65593 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
65594 &ikconfig_file_ops);
65595+#endif
65596+
65597 if (!entry)
65598 return -ENOMEM;
65599
65600diff --git a/kernel/cred.c b/kernel/cred.c
65601index 5791612..a3c04dc 100644
65602--- a/kernel/cred.c
65603+++ b/kernel/cred.c
65604@@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
65605 validate_creds(cred);
65606 put_cred(cred);
65607 }
65608+
65609+#ifdef CONFIG_GRKERNSEC_SETXID
65610+ cred = (struct cred *) tsk->delayed_cred;
65611+ if (cred) {
65612+ tsk->delayed_cred = NULL;
65613+ validate_creds(cred);
65614+ put_cred(cred);
65615+ }
65616+#endif
65617 }
65618
65619 /**
65620@@ -470,7 +479,7 @@ error_put:
65621 * Always returns 0 thus allowing this function to be tail-called at the end
65622 * of, say, sys_setgid().
65623 */
65624-int commit_creds(struct cred *new)
65625+static int __commit_creds(struct cred *new)
65626 {
65627 struct task_struct *task = current;
65628 const struct cred *old = task->real_cred;
65629@@ -489,6 +498,8 @@ int commit_creds(struct cred *new)
65630
65631 get_cred(new); /* we will require a ref for the subj creds too */
65632
65633+ gr_set_role_label(task, new->uid, new->gid);
65634+
65635 /* dumpability changes */
65636 if (old->euid != new->euid ||
65637 old->egid != new->egid ||
65638@@ -538,6 +549,92 @@ int commit_creds(struct cred *new)
65639 put_cred(old);
65640 return 0;
65641 }
65642+#ifdef CONFIG_GRKERNSEC_SETXID
65643+extern int set_user(struct cred *new);
65644+
65645+void gr_delayed_cred_worker(void)
65646+{
65647+ const struct cred *new = current->delayed_cred;
65648+ struct cred *ncred;
65649+
65650+ current->delayed_cred = NULL;
65651+
65652+ if (current_uid() && new != NULL) {
65653+ // from doing get_cred on it when queueing this
65654+ put_cred(new);
65655+ return;
65656+ } else if (new == NULL)
65657+ return;
65658+
65659+ ncred = prepare_creds();
65660+ if (!ncred)
65661+ goto die;
65662+ // uids
65663+ ncred->uid = new->uid;
65664+ ncred->euid = new->euid;
65665+ ncred->suid = new->suid;
65666+ ncred->fsuid = new->fsuid;
65667+ // gids
65668+ ncred->gid = new->gid;
65669+ ncred->egid = new->egid;
65670+ ncred->sgid = new->sgid;
65671+ ncred->fsgid = new->fsgid;
65672+ // groups
65673+ if (set_groups(ncred, new->group_info) < 0) {
65674+ abort_creds(ncred);
65675+ goto die;
65676+ }
65677+ // caps
65678+ ncred->securebits = new->securebits;
65679+ ncred->cap_inheritable = new->cap_inheritable;
65680+ ncred->cap_permitted = new->cap_permitted;
65681+ ncred->cap_effective = new->cap_effective;
65682+ ncred->cap_bset = new->cap_bset;
65683+
65684+ if (set_user(ncred)) {
65685+ abort_creds(ncred);
65686+ goto die;
65687+ }
65688+
65689+ // from doing get_cred on it when queueing this
65690+ put_cred(new);
65691+
65692+ __commit_creds(ncred);
65693+ return;
65694+die:
65695+ // from doing get_cred on it when queueing this
65696+ put_cred(new);
65697+ do_group_exit(SIGKILL);
65698+}
65699+#endif
65700+
65701+int commit_creds(struct cred *new)
65702+{
65703+#ifdef CONFIG_GRKERNSEC_SETXID
65704+ struct task_struct *t;
65705+
65706+ /* we won't get called with tasklist_lock held for writing
65707+ and interrupts disabled as the cred struct in that case is
65708+ init_cred
65709+ */
65710+ if (grsec_enable_setxid && !current_is_single_threaded() &&
65711+ !current_uid() && new->uid) {
65712+ rcu_read_lock();
65713+ read_lock(&tasklist_lock);
65714+ for (t = next_thread(current); t != current;
65715+ t = next_thread(t)) {
65716+ if (t->delayed_cred == NULL) {
65717+ t->delayed_cred = get_cred(new);
65718+ set_tsk_need_resched(t);
65719+ }
65720+ }
65721+ read_unlock(&tasklist_lock);
65722+ rcu_read_unlock();
65723+ }
65724+#endif
65725+ return __commit_creds(new);
65726+}
65727+
65728 EXPORT_SYMBOL(commit_creds);
65729
65730 /**
65731diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
65732index 0d7c087..01b8cef 100644
65733--- a/kernel/debug/debug_core.c
65734+++ b/kernel/debug/debug_core.c
65735@@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
65736 */
65737 static atomic_t masters_in_kgdb;
65738 static atomic_t slaves_in_kgdb;
65739-static atomic_t kgdb_break_tasklet_var;
65740+static atomic_unchecked_t kgdb_break_tasklet_var;
65741 atomic_t kgdb_setting_breakpoint;
65742
65743 struct task_struct *kgdb_usethread;
65744@@ -129,7 +129,7 @@ int kgdb_single_step;
65745 static pid_t kgdb_sstep_pid;
65746
65747 /* to keep track of the CPU which is doing the single stepping*/
65748-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65749+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65750
65751 /*
65752 * If you are debugging a problem where roundup (the collection of
65753@@ -542,7 +542,7 @@ return_normal:
65754 * kernel will only try for the value of sstep_tries before
65755 * giving up and continuing on.
65756 */
65757- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
65758+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
65759 (kgdb_info[cpu].task &&
65760 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
65761 atomic_set(&kgdb_active, -1);
65762@@ -636,8 +636,8 @@ cpu_master_loop:
65763 }
65764
65765 kgdb_restore:
65766- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
65767- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
65768+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
65769+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
65770 if (kgdb_info[sstep_cpu].task)
65771 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
65772 else
65773@@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(void)
65774 static void kgdb_tasklet_bpt(unsigned long ing)
65775 {
65776 kgdb_breakpoint();
65777- atomic_set(&kgdb_break_tasklet_var, 0);
65778+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
65779 }
65780
65781 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
65782
65783 void kgdb_schedule_breakpoint(void)
65784 {
65785- if (atomic_read(&kgdb_break_tasklet_var) ||
65786+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
65787 atomic_read(&kgdb_active) != -1 ||
65788 atomic_read(&kgdb_setting_breakpoint))
65789 return;
65790- atomic_inc(&kgdb_break_tasklet_var);
65791+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
65792 tasklet_schedule(&kgdb_tasklet_breakpoint);
65793 }
65794 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
65795diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
65796index e2ae734..08a4c5c 100644
65797--- a/kernel/debug/kdb/kdb_main.c
65798+++ b/kernel/debug/kdb/kdb_main.c
65799@@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
65800 list_for_each_entry(mod, kdb_modules, list) {
65801
65802 kdb_printf("%-20s%8u 0x%p ", mod->name,
65803- mod->core_size, (void *)mod);
65804+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
65805 #ifdef CONFIG_MODULE_UNLOAD
65806 kdb_printf("%4ld ", module_refcount(mod));
65807 #endif
65808@@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
65809 kdb_printf(" (Loading)");
65810 else
65811 kdb_printf(" (Live)");
65812- kdb_printf(" 0x%p", mod->module_core);
65813+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
65814
65815 #ifdef CONFIG_MODULE_UNLOAD
65816 {
65817diff --git a/kernel/events/core.c b/kernel/events/core.c
65818index 1b5c081..c375f83 100644
65819--- a/kernel/events/core.c
65820+++ b/kernel/events/core.c
65821@@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
65822 return 0;
65823 }
65824
65825-static atomic64_t perf_event_id;
65826+static atomic64_unchecked_t perf_event_id;
65827
65828 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
65829 enum event_type_t event_type);
65830@@ -2581,7 +2581,7 @@ static void __perf_event_read(void *info)
65831
65832 static inline u64 perf_event_count(struct perf_event *event)
65833 {
65834- return local64_read(&event->count) + atomic64_read(&event->child_count);
65835+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
65836 }
65837
65838 static u64 perf_event_read(struct perf_event *event)
65839@@ -2897,9 +2897,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
65840 mutex_lock(&event->child_mutex);
65841 total += perf_event_read(event);
65842 *enabled += event->total_time_enabled +
65843- atomic64_read(&event->child_total_time_enabled);
65844+ atomic64_read_unchecked(&event->child_total_time_enabled);
65845 *running += event->total_time_running +
65846- atomic64_read(&event->child_total_time_running);
65847+ atomic64_read_unchecked(&event->child_total_time_running);
65848
65849 list_for_each_entry(child, &event->child_list, child_list) {
65850 total += perf_event_read(child);
65851@@ -3306,10 +3306,10 @@ void perf_event_update_userpage(struct perf_event *event)
65852 userpg->offset -= local64_read(&event->hw.prev_count);
65853
65854 userpg->time_enabled = enabled +
65855- atomic64_read(&event->child_total_time_enabled);
65856+ atomic64_read_unchecked(&event->child_total_time_enabled);
65857
65858 userpg->time_running = running +
65859- atomic64_read(&event->child_total_time_running);
65860+ atomic64_read_unchecked(&event->child_total_time_running);
65861
65862 barrier();
65863 ++userpg->lock;
65864@@ -3738,11 +3738,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
65865 values[n++] = perf_event_count(event);
65866 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65867 values[n++] = enabled +
65868- atomic64_read(&event->child_total_time_enabled);
65869+ atomic64_read_unchecked(&event->child_total_time_enabled);
65870 }
65871 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65872 values[n++] = running +
65873- atomic64_read(&event->child_total_time_running);
65874+ atomic64_read_unchecked(&event->child_total_time_running);
65875 }
65876 if (read_format & PERF_FORMAT_ID)
65877 values[n++] = primary_event_id(event);
65878@@ -4393,12 +4393,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
65879 * need to add enough zero bytes after the string to handle
65880 * the 64bit alignment we do later.
65881 */
65882- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65883+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
65884 if (!buf) {
65885 name = strncpy(tmp, "//enomem", sizeof(tmp));
65886 goto got_name;
65887 }
65888- name = d_path(&file->f_path, buf, PATH_MAX);
65889+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65890 if (IS_ERR(name)) {
65891 name = strncpy(tmp, "//toolong", sizeof(tmp));
65892 goto got_name;
65893@@ -5765,7 +5765,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
65894 event->parent = parent_event;
65895
65896 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65897- event->id = atomic64_inc_return(&perf_event_id);
65898+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
65899
65900 event->state = PERF_EVENT_STATE_INACTIVE;
65901
65902@@ -6287,10 +6287,10 @@ static void sync_child_event(struct perf_event *child_event,
65903 /*
65904 * Add back the child's count to the parent's count:
65905 */
65906- atomic64_add(child_val, &parent_event->child_count);
65907- atomic64_add(child_event->total_time_enabled,
65908+ atomic64_add_unchecked(child_val, &parent_event->child_count);
65909+ atomic64_add_unchecked(child_event->total_time_enabled,
65910 &parent_event->child_total_time_enabled);
65911- atomic64_add(child_event->total_time_running,
65912+ atomic64_add_unchecked(child_event->total_time_running,
65913 &parent_event->child_total_time_running);
65914
65915 /*
65916diff --git a/kernel/exit.c b/kernel/exit.c
65917index 4b4042f..5bdd8d5 100644
65918--- a/kernel/exit.c
65919+++ b/kernel/exit.c
65920@@ -58,6 +58,10 @@
65921 #include <asm/pgtable.h>
65922 #include <asm/mmu_context.h>
65923
65924+#ifdef CONFIG_GRKERNSEC
65925+extern rwlock_t grsec_exec_file_lock;
65926+#endif
65927+
65928 static void exit_mm(struct task_struct * tsk);
65929
65930 static void __unhash_process(struct task_struct *p, bool group_dead)
65931@@ -169,6 +173,10 @@ void release_task(struct task_struct * p)
65932 struct task_struct *leader;
65933 int zap_leader;
65934 repeat:
65935+#ifdef CONFIG_NET
65936+ gr_del_task_from_ip_table(p);
65937+#endif
65938+
65939 /* don't need to get the RCU readlock here - the process is dead and
65940 * can't be modifying its own credentials. But shut RCU-lockdep up */
65941 rcu_read_lock();
65942@@ -381,7 +389,7 @@ int allow_signal(int sig)
65943 * know it'll be handled, so that they don't get converted to
65944 * SIGKILL or just silently dropped.
65945 */
65946- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
65947+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
65948 recalc_sigpending();
65949 spin_unlock_irq(&current->sighand->siglock);
65950 return 0;
65951@@ -417,6 +425,17 @@ void daemonize(const char *name, ...)
65952 vsnprintf(current->comm, sizeof(current->comm), name, args);
65953 va_end(args);
65954
65955+#ifdef CONFIG_GRKERNSEC
65956+ write_lock(&grsec_exec_file_lock);
65957+ if (current->exec_file) {
65958+ fput(current->exec_file);
65959+ current->exec_file = NULL;
65960+ }
65961+ write_unlock(&grsec_exec_file_lock);
65962+#endif
65963+
65964+ gr_set_kernel_label(current);
65965+
65966 /*
65967 * If we were started as result of loading a module, close all of the
65968 * user space pages. We don't need them, and if we didn't close them
65969@@ -892,6 +911,8 @@ void do_exit(long code)
65970 struct task_struct *tsk = current;
65971 int group_dead;
65972
65973+ set_fs(USER_DS);
65974+
65975 profile_task_exit(tsk);
65976
65977 WARN_ON(blk_needs_flush_plug(tsk));
65978@@ -908,7 +929,6 @@ void do_exit(long code)
65979 * mm_release()->clear_child_tid() from writing to a user-controlled
65980 * kernel address.
65981 */
65982- set_fs(USER_DS);
65983
65984 ptrace_event(PTRACE_EVENT_EXIT, code);
65985
65986@@ -969,6 +989,9 @@ void do_exit(long code)
65987 tsk->exit_code = code;
65988 taskstats_exit(tsk, group_dead);
65989
65990+ gr_acl_handle_psacct(tsk, code);
65991+ gr_acl_handle_exit();
65992+
65993 exit_mm(tsk);
65994
65995 if (group_dead)
65996@@ -1085,7 +1108,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
65997 * Take down every thread in the group. This is called by fatal signals
65998 * as well as by sys_exit_group (below).
65999 */
66000-void
66001+__noreturn void
66002 do_group_exit(int exit_code)
66003 {
66004 struct signal_struct *sig = current->signal;
66005diff --git a/kernel/fork.c b/kernel/fork.c
66006index 26a7a67..a1053f9 100644
66007--- a/kernel/fork.c
66008+++ b/kernel/fork.c
66009@@ -284,7 +284,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
66010 *stackend = STACK_END_MAGIC; /* for overflow detection */
66011
66012 #ifdef CONFIG_CC_STACKPROTECTOR
66013- tsk->stack_canary = get_random_int();
66014+ tsk->stack_canary = pax_get_random_long();
66015 #endif
66016
66017 /*
66018@@ -308,13 +308,77 @@ out:
66019 }
66020
66021 #ifdef CONFIG_MMU
66022+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
66023+{
66024+ struct vm_area_struct *tmp;
66025+ unsigned long charge;
66026+ struct mempolicy *pol;
66027+ struct file *file;
66028+
66029+ charge = 0;
66030+ if (mpnt->vm_flags & VM_ACCOUNT) {
66031+ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66032+ if (security_vm_enough_memory(len))
66033+ goto fail_nomem;
66034+ charge = len;
66035+ }
66036+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66037+ if (!tmp)
66038+ goto fail_nomem;
66039+ *tmp = *mpnt;
66040+ tmp->vm_mm = mm;
66041+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
66042+ pol = mpol_dup(vma_policy(mpnt));
66043+ if (IS_ERR(pol))
66044+ goto fail_nomem_policy;
66045+ vma_set_policy(tmp, pol);
66046+ if (anon_vma_fork(tmp, mpnt))
66047+ goto fail_nomem_anon_vma_fork;
66048+ tmp->vm_flags &= ~VM_LOCKED;
66049+ tmp->vm_next = tmp->vm_prev = NULL;
66050+ tmp->vm_mirror = NULL;
66051+ file = tmp->vm_file;
66052+ if (file) {
66053+ struct inode *inode = file->f_path.dentry->d_inode;
66054+ struct address_space *mapping = file->f_mapping;
66055+
66056+ get_file(file);
66057+ if (tmp->vm_flags & VM_DENYWRITE)
66058+ atomic_dec(&inode->i_writecount);
66059+ mutex_lock(&mapping->i_mmap_mutex);
66060+ if (tmp->vm_flags & VM_SHARED)
66061+ mapping->i_mmap_writable++;
66062+ flush_dcache_mmap_lock(mapping);
66063+ /* insert tmp into the share list, just after mpnt */
66064+ vma_prio_tree_add(tmp, mpnt);
66065+ flush_dcache_mmap_unlock(mapping);
66066+ mutex_unlock(&mapping->i_mmap_mutex);
66067+ }
66068+
66069+ /*
66070+ * Clear hugetlb-related page reserves for children. This only
66071+ * affects MAP_PRIVATE mappings. Faults generated by the child
66072+ * are not guaranteed to succeed, even if read-only
66073+ */
66074+ if (is_vm_hugetlb_page(tmp))
66075+ reset_vma_resv_huge_pages(tmp);
66076+
66077+ return tmp;
66078+
66079+fail_nomem_anon_vma_fork:
66080+ mpol_put(pol);
66081+fail_nomem_policy:
66082+ kmem_cache_free(vm_area_cachep, tmp);
66083+fail_nomem:
66084+ vm_unacct_memory(charge);
66085+ return NULL;
66086+}
66087+
66088 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66089 {
66090 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
66091 struct rb_node **rb_link, *rb_parent;
66092 int retval;
66093- unsigned long charge;
66094- struct mempolicy *pol;
66095
66096 down_write(&oldmm->mmap_sem);
66097 flush_cache_dup_mm(oldmm);
66098@@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66099 mm->locked_vm = 0;
66100 mm->mmap = NULL;
66101 mm->mmap_cache = NULL;
66102- mm->free_area_cache = oldmm->mmap_base;
66103- mm->cached_hole_size = ~0UL;
66104+ mm->free_area_cache = oldmm->free_area_cache;
66105+ mm->cached_hole_size = oldmm->cached_hole_size;
66106 mm->map_count = 0;
66107 cpumask_clear(mm_cpumask(mm));
66108 mm->mm_rb = RB_ROOT;
66109@@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66110
66111 prev = NULL;
66112 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
66113- struct file *file;
66114-
66115 if (mpnt->vm_flags & VM_DONTCOPY) {
66116 long pages = vma_pages(mpnt);
66117 mm->total_vm -= pages;
66118@@ -352,53 +414,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66119 -pages);
66120 continue;
66121 }
66122- charge = 0;
66123- if (mpnt->vm_flags & VM_ACCOUNT) {
66124- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66125- if (security_vm_enough_memory(len))
66126- goto fail_nomem;
66127- charge = len;
66128+ tmp = dup_vma(mm, mpnt);
66129+ if (!tmp) {
66130+ retval = -ENOMEM;
66131+ goto out;
66132 }
66133- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66134- if (!tmp)
66135- goto fail_nomem;
66136- *tmp = *mpnt;
66137- INIT_LIST_HEAD(&tmp->anon_vma_chain);
66138- pol = mpol_dup(vma_policy(mpnt));
66139- retval = PTR_ERR(pol);
66140- if (IS_ERR(pol))
66141- goto fail_nomem_policy;
66142- vma_set_policy(tmp, pol);
66143- tmp->vm_mm = mm;
66144- if (anon_vma_fork(tmp, mpnt))
66145- goto fail_nomem_anon_vma_fork;
66146- tmp->vm_flags &= ~VM_LOCKED;
66147- tmp->vm_next = tmp->vm_prev = NULL;
66148- file = tmp->vm_file;
66149- if (file) {
66150- struct inode *inode = file->f_path.dentry->d_inode;
66151- struct address_space *mapping = file->f_mapping;
66152-
66153- get_file(file);
66154- if (tmp->vm_flags & VM_DENYWRITE)
66155- atomic_dec(&inode->i_writecount);
66156- mutex_lock(&mapping->i_mmap_mutex);
66157- if (tmp->vm_flags & VM_SHARED)
66158- mapping->i_mmap_writable++;
66159- flush_dcache_mmap_lock(mapping);
66160- /* insert tmp into the share list, just after mpnt */
66161- vma_prio_tree_add(tmp, mpnt);
66162- flush_dcache_mmap_unlock(mapping);
66163- mutex_unlock(&mapping->i_mmap_mutex);
66164- }
66165-
66166- /*
66167- * Clear hugetlb-related page reserves for children. This only
66168- * affects MAP_PRIVATE mappings. Faults generated by the child
66169- * are not guaranteed to succeed, even if read-only
66170- */
66171- if (is_vm_hugetlb_page(tmp))
66172- reset_vma_resv_huge_pages(tmp);
66173
66174 /*
66175 * Link in the new vma and copy the page table entries.
66176@@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66177 if (retval)
66178 goto out;
66179 }
66180+
66181+#ifdef CONFIG_PAX_SEGMEXEC
66182+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
66183+ struct vm_area_struct *mpnt_m;
66184+
66185+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
66186+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
66187+
66188+ if (!mpnt->vm_mirror)
66189+ continue;
66190+
66191+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
66192+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
66193+ mpnt->vm_mirror = mpnt_m;
66194+ } else {
66195+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
66196+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
66197+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
66198+ mpnt->vm_mirror->vm_mirror = mpnt;
66199+ }
66200+ }
66201+ BUG_ON(mpnt_m);
66202+ }
66203+#endif
66204+
66205 /* a new mm has just been created */
66206 arch_dup_mmap(oldmm, mm);
66207 retval = 0;
66208@@ -429,14 +474,6 @@ out:
66209 flush_tlb_mm(oldmm);
66210 up_write(&oldmm->mmap_sem);
66211 return retval;
66212-fail_nomem_anon_vma_fork:
66213- mpol_put(pol);
66214-fail_nomem_policy:
66215- kmem_cache_free(vm_area_cachep, tmp);
66216-fail_nomem:
66217- retval = -ENOMEM;
66218- vm_unacct_memory(charge);
66219- goto out;
66220 }
66221
66222 static inline int mm_alloc_pgd(struct mm_struct *mm)
66223@@ -658,8 +695,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
66224 return ERR_PTR(err);
66225
66226 mm = get_task_mm(task);
66227- if (mm && mm != current->mm &&
66228- !ptrace_may_access(task, mode)) {
66229+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
66230+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
66231 mmput(mm);
66232 mm = ERR_PTR(-EACCES);
66233 }
66234@@ -881,13 +918,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
66235 spin_unlock(&fs->lock);
66236 return -EAGAIN;
66237 }
66238- fs->users++;
66239+ atomic_inc(&fs->users);
66240 spin_unlock(&fs->lock);
66241 return 0;
66242 }
66243 tsk->fs = copy_fs_struct(fs);
66244 if (!tsk->fs)
66245 return -ENOMEM;
66246+ gr_set_chroot_entries(tsk, &tsk->fs->root);
66247 return 0;
66248 }
66249
66250@@ -1151,6 +1189,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66251 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
66252 #endif
66253 retval = -EAGAIN;
66254+
66255+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
66256+
66257 if (atomic_read(&p->real_cred->user->processes) >=
66258 task_rlimit(p, RLIMIT_NPROC)) {
66259 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
66260@@ -1306,6 +1347,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66261 if (clone_flags & CLONE_THREAD)
66262 p->tgid = current->tgid;
66263
66264+ gr_copy_label(p);
66265+
66266 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
66267 /*
66268 * Clear TID on mm_release()?
66269@@ -1472,6 +1515,8 @@ bad_fork_cleanup_count:
66270 bad_fork_free:
66271 free_task(p);
66272 fork_out:
66273+ gr_log_forkfail(retval);
66274+
66275 return ERR_PTR(retval);
66276 }
66277
66278@@ -1572,6 +1617,8 @@ long do_fork(unsigned long clone_flags,
66279 if (clone_flags & CLONE_PARENT_SETTID)
66280 put_user(nr, parent_tidptr);
66281
66282+ gr_handle_brute_check();
66283+
66284 if (clone_flags & CLONE_VFORK) {
66285 p->vfork_done = &vfork;
66286 init_completion(&vfork);
66287@@ -1670,7 +1717,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
66288 return 0;
66289
66290 /* don't need lock here; in the worst case we'll do useless copy */
66291- if (fs->users == 1)
66292+ if (atomic_read(&fs->users) == 1)
66293 return 0;
66294
66295 *new_fsp = copy_fs_struct(fs);
66296@@ -1759,7 +1806,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
66297 fs = current->fs;
66298 spin_lock(&fs->lock);
66299 current->fs = new_fs;
66300- if (--fs->users)
66301+ gr_set_chroot_entries(current, &current->fs->root);
66302+ if (atomic_dec_return(&fs->users))
66303 new_fs = NULL;
66304 else
66305 new_fs = fs;
66306diff --git a/kernel/futex.c b/kernel/futex.c
66307index 0677023..f3c3b79 100644
66308--- a/kernel/futex.c
66309+++ b/kernel/futex.c
66310@@ -54,6 +54,7 @@
66311 #include <linux/mount.h>
66312 #include <linux/pagemap.h>
66313 #include <linux/syscalls.h>
66314+#include <linux/ptrace.h>
66315 #include <linux/signal.h>
66316 #include <linux/export.h>
66317 #include <linux/magic.h>
66318@@ -238,6 +239,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
66319 struct page *page, *page_head;
66320 int err, ro = 0;
66321
66322+#ifdef CONFIG_PAX_SEGMEXEC
66323+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
66324+ return -EFAULT;
66325+#endif
66326+
66327 /*
66328 * The futex address must be "naturally" aligned.
66329 */
66330@@ -2459,6 +2465,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
66331 if (!p)
66332 goto err_unlock;
66333 ret = -EPERM;
66334+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66335+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
66336+ goto err_unlock;
66337+#endif
66338 pcred = __task_cred(p);
66339 /* If victim is in different user_ns, then uids are not
66340 comparable, so we must have CAP_SYS_PTRACE */
66341@@ -2731,6 +2741,7 @@ static int __init futex_init(void)
66342 {
66343 u32 curval;
66344 int i;
66345+ mm_segment_t oldfs;
66346
66347 /*
66348 * This will fail and we want it. Some arch implementations do
66349@@ -2742,8 +2753,11 @@ static int __init futex_init(void)
66350 * implementation, the non-functional ones will return
66351 * -ENOSYS.
66352 */
66353+ oldfs = get_fs();
66354+ set_fs(USER_DS);
66355 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
66356 futex_cmpxchg_enabled = 1;
66357+ set_fs(oldfs);
66358
66359 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
66360 plist_head_init(&futex_queues[i].chain);
66361diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
66362index 5f9e689..582d46d 100644
66363--- a/kernel/futex_compat.c
66364+++ b/kernel/futex_compat.c
66365@@ -10,6 +10,7 @@
66366 #include <linux/compat.h>
66367 #include <linux/nsproxy.h>
66368 #include <linux/futex.h>
66369+#include <linux/ptrace.h>
66370
66371 #include <asm/uaccess.h>
66372
66373@@ -136,7 +137,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
66374 {
66375 struct compat_robust_list_head __user *head;
66376 unsigned long ret;
66377- const struct cred *cred = current_cred(), *pcred;
66378+ const struct cred *cred = current_cred();
66379+ const struct cred *pcred;
66380
66381 if (!futex_cmpxchg_enabled)
66382 return -ENOSYS;
66383@@ -152,6 +154,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
66384 if (!p)
66385 goto err_unlock;
66386 ret = -EPERM;
66387+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66388+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
66389+ goto err_unlock;
66390+#endif
66391 pcred = __task_cred(p);
66392 /* If victim is in different user_ns, then uids are not
66393 comparable, so we must have CAP_SYS_PTRACE */
66394diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
66395index 9b22d03..6295b62 100644
66396--- a/kernel/gcov/base.c
66397+++ b/kernel/gcov/base.c
66398@@ -102,11 +102,6 @@ void gcov_enable_events(void)
66399 }
66400
66401 #ifdef CONFIG_MODULES
66402-static inline int within(void *addr, void *start, unsigned long size)
66403-{
66404- return ((addr >= start) && (addr < start + size));
66405-}
66406-
66407 /* Update list and generate events when modules are unloaded. */
66408 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66409 void *data)
66410@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66411 prev = NULL;
66412 /* Remove entries located in module from linked list. */
66413 for (info = gcov_info_head; info; info = info->next) {
66414- if (within(info, mod->module_core, mod->core_size)) {
66415+ if (within_module_core_rw((unsigned long)info, mod)) {
66416 if (prev)
66417 prev->next = info->next;
66418 else
66419diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
66420index ae34bf5..4e2f3d0 100644
66421--- a/kernel/hrtimer.c
66422+++ b/kernel/hrtimer.c
66423@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
66424 local_irq_restore(flags);
66425 }
66426
66427-static void run_hrtimer_softirq(struct softirq_action *h)
66428+static void run_hrtimer_softirq(void)
66429 {
66430 hrtimer_peek_ahead_timers();
66431 }
66432diff --git a/kernel/jump_label.c b/kernel/jump_label.c
66433index 01d3b70..9e4d098 100644
66434--- a/kernel/jump_label.c
66435+++ b/kernel/jump_label.c
66436@@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
66437
66438 size = (((unsigned long)stop - (unsigned long)start)
66439 / sizeof(struct jump_entry));
66440+ pax_open_kernel();
66441 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
66442+ pax_close_kernel();
66443 }
66444
66445 static void jump_label_update(struct jump_label_key *key, int enable);
66446@@ -340,10 +342,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
66447 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
66448 struct jump_entry *iter;
66449
66450+ pax_open_kernel();
66451 for (iter = iter_start; iter < iter_stop; iter++) {
66452 if (within_module_init(iter->code, mod))
66453 iter->code = 0;
66454 }
66455+ pax_close_kernel();
66456 }
66457
66458 static int
66459diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
66460index 079f1d3..a407562 100644
66461--- a/kernel/kallsyms.c
66462+++ b/kernel/kallsyms.c
66463@@ -11,6 +11,9 @@
66464 * Changed the compression method from stem compression to "table lookup"
66465 * compression (see scripts/kallsyms.c for a more complete description)
66466 */
66467+#ifdef CONFIG_GRKERNSEC_HIDESYM
66468+#define __INCLUDED_BY_HIDESYM 1
66469+#endif
66470 #include <linux/kallsyms.h>
66471 #include <linux/module.h>
66472 #include <linux/init.h>
66473@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
66474
66475 static inline int is_kernel_inittext(unsigned long addr)
66476 {
66477+ if (system_state != SYSTEM_BOOTING)
66478+ return 0;
66479+
66480 if (addr >= (unsigned long)_sinittext
66481 && addr <= (unsigned long)_einittext)
66482 return 1;
66483 return 0;
66484 }
66485
66486+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66487+#ifdef CONFIG_MODULES
66488+static inline int is_module_text(unsigned long addr)
66489+{
66490+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
66491+ return 1;
66492+
66493+ addr = ktla_ktva(addr);
66494+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
66495+}
66496+#else
66497+static inline int is_module_text(unsigned long addr)
66498+{
66499+ return 0;
66500+}
66501+#endif
66502+#endif
66503+
66504 static inline int is_kernel_text(unsigned long addr)
66505 {
66506 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
66507@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
66508
66509 static inline int is_kernel(unsigned long addr)
66510 {
66511+
66512+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66513+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
66514+ return 1;
66515+
66516+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
66517+#else
66518 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
66519+#endif
66520+
66521 return 1;
66522 return in_gate_area_no_mm(addr);
66523 }
66524
66525 static int is_ksym_addr(unsigned long addr)
66526 {
66527+
66528+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66529+ if (is_module_text(addr))
66530+ return 0;
66531+#endif
66532+
66533 if (all_var)
66534 return is_kernel(addr);
66535
66536@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
66537
66538 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
66539 {
66540- iter->name[0] = '\0';
66541 iter->nameoff = get_symbol_offset(new_pos);
66542 iter->pos = new_pos;
66543 }
66544@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
66545 {
66546 struct kallsym_iter *iter = m->private;
66547
66548+#ifdef CONFIG_GRKERNSEC_HIDESYM
66549+ if (current_uid())
66550+ return 0;
66551+#endif
66552+
66553 /* Some debugging symbols have no name. Ignore them. */
66554 if (!iter->name[0])
66555 return 0;
66556@@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
66557 struct kallsym_iter *iter;
66558 int ret;
66559
66560- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
66561+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
66562 if (!iter)
66563 return -ENOMEM;
66564 reset_iter(iter, 0);
66565diff --git a/kernel/kexec.c b/kernel/kexec.c
66566index 7b08867..3bac516 100644
66567--- a/kernel/kexec.c
66568+++ b/kernel/kexec.c
66569@@ -1047,7 +1047,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
66570 unsigned long flags)
66571 {
66572 struct compat_kexec_segment in;
66573- struct kexec_segment out, __user *ksegments;
66574+ struct kexec_segment out;
66575+ struct kexec_segment __user *ksegments;
66576 unsigned long i, result;
66577
66578 /* Don't allow clients that don't understand the native
66579diff --git a/kernel/kmod.c b/kernel/kmod.c
66580index a0a8854..642b106 100644
66581--- a/kernel/kmod.c
66582+++ b/kernel/kmod.c
66583@@ -75,13 +75,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
66584 * If module auto-loading support is disabled then this function
66585 * becomes a no-operation.
66586 */
66587-int __request_module(bool wait, const char *fmt, ...)
66588+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
66589 {
66590- va_list args;
66591 char module_name[MODULE_NAME_LEN];
66592 unsigned int max_modprobes;
66593 int ret;
66594- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
66595+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
66596 static char *envp[] = { "HOME=/",
66597 "TERM=linux",
66598 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
66599@@ -90,9 +89,7 @@ int __request_module(bool wait, const char *fmt, ...)
66600 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
66601 static int kmod_loop_msg;
66602
66603- va_start(args, fmt);
66604- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
66605- va_end(args);
66606+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
66607 if (ret >= MODULE_NAME_LEN)
66608 return -ENAMETOOLONG;
66609
66610@@ -100,6 +97,20 @@ int __request_module(bool wait, const char *fmt, ...)
66611 if (ret)
66612 return ret;
66613
66614+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66615+ if (!current_uid()) {
66616+ /* hack to workaround consolekit/udisks stupidity */
66617+ read_lock(&tasklist_lock);
66618+ if (!strcmp(current->comm, "mount") &&
66619+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
66620+ read_unlock(&tasklist_lock);
66621+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
66622+ return -EPERM;
66623+ }
66624+ read_unlock(&tasklist_lock);
66625+ }
66626+#endif
66627+
66628 /* If modprobe needs a service that is in a module, we get a recursive
66629 * loop. Limit the number of running kmod threads to max_threads/2 or
66630 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
66631@@ -135,6 +146,47 @@ int __request_module(bool wait, const char *fmt, ...)
66632 atomic_dec(&kmod_concurrent);
66633 return ret;
66634 }
66635+
66636+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
66637+{
66638+ va_list args;
66639+ int ret;
66640+
66641+ va_start(args, fmt);
66642+ ret = ____request_module(wait, module_param, fmt, args);
66643+ va_end(args);
66644+
66645+ return ret;
66646+}
66647+
66648+int __request_module(bool wait, const char *fmt, ...)
66649+{
66650+ va_list args;
66651+ int ret;
66652+
66653+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66654+ if (current_uid()) {
66655+ char module_param[MODULE_NAME_LEN];
66656+
66657+ memset(module_param, 0, sizeof(module_param));
66658+
66659+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
66660+
66661+ va_start(args, fmt);
66662+ ret = ____request_module(wait, module_param, fmt, args);
66663+ va_end(args);
66664+
66665+ return ret;
66666+ }
66667+#endif
66668+
66669+ va_start(args, fmt);
66670+ ret = ____request_module(wait, NULL, fmt, args);
66671+ va_end(args);
66672+
66673+ return ret;
66674+}
66675+
66676 EXPORT_SYMBOL(__request_module);
66677 #endif /* CONFIG_MODULES */
66678
66679@@ -224,7 +276,7 @@ static int wait_for_helper(void *data)
66680 *
66681 * Thus the __user pointer cast is valid here.
66682 */
66683- sys_wait4(pid, (int __user *)&ret, 0, NULL);
66684+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
66685
66686 /*
66687 * If ret is 0, either ____call_usermodehelper failed and the
66688diff --git a/kernel/kprobes.c b/kernel/kprobes.c
66689index c62b854..cb67968 100644
66690--- a/kernel/kprobes.c
66691+++ b/kernel/kprobes.c
66692@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
66693 * kernel image and loaded module images reside. This is required
66694 * so x86_64 can correctly handle the %rip-relative fixups.
66695 */
66696- kip->insns = module_alloc(PAGE_SIZE);
66697+ kip->insns = module_alloc_exec(PAGE_SIZE);
66698 if (!kip->insns) {
66699 kfree(kip);
66700 return NULL;
66701@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
66702 */
66703 if (!list_is_singular(&kip->list)) {
66704 list_del(&kip->list);
66705- module_free(NULL, kip->insns);
66706+ module_free_exec(NULL, kip->insns);
66707 kfree(kip);
66708 }
66709 return 1;
66710@@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
66711 {
66712 int i, err = 0;
66713 unsigned long offset = 0, size = 0;
66714- char *modname, namebuf[128];
66715+ char *modname, namebuf[KSYM_NAME_LEN];
66716 const char *symbol_name;
66717 void *addr;
66718 struct kprobe_blackpoint *kb;
66719@@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
66720 const char *sym = NULL;
66721 unsigned int i = *(loff_t *) v;
66722 unsigned long offset = 0;
66723- char *modname, namebuf[128];
66724+ char *modname, namebuf[KSYM_NAME_LEN];
66725
66726 head = &kprobe_table[i];
66727 preempt_disable();
66728diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
66729index 4e316e1..5501eef 100644
66730--- a/kernel/ksysfs.c
66731+++ b/kernel/ksysfs.c
66732@@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
66733 {
66734 if (count+1 > UEVENT_HELPER_PATH_LEN)
66735 return -ENOENT;
66736+ if (!capable(CAP_SYS_ADMIN))
66737+ return -EPERM;
66738 memcpy(uevent_helper, buf, count);
66739 uevent_helper[count] = '\0';
66740 if (count && uevent_helper[count-1] == '\n')
66741diff --git a/kernel/lockdep.c b/kernel/lockdep.c
66742index 8889f7d..95319b7 100644
66743--- a/kernel/lockdep.c
66744+++ b/kernel/lockdep.c
66745@@ -590,6 +590,10 @@ static int static_obj(void *obj)
66746 end = (unsigned long) &_end,
66747 addr = (unsigned long) obj;
66748
66749+#ifdef CONFIG_PAX_KERNEXEC
66750+ start = ktla_ktva(start);
66751+#endif
66752+
66753 /*
66754 * static variable?
66755 */
66756@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
66757 if (!static_obj(lock->key)) {
66758 debug_locks_off();
66759 printk("INFO: trying to register non-static key.\n");
66760+ printk("lock:%pS key:%pS.\n", lock, lock->key);
66761 printk("the code is fine but needs lockdep annotation.\n");
66762 printk("turning off the locking correctness validator.\n");
66763 dump_stack();
66764@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
66765 if (!class)
66766 return 0;
66767 }
66768- atomic_inc((atomic_t *)&class->ops);
66769+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
66770 if (very_verbose(class)) {
66771 printk("\nacquire class [%p] %s", class->key, class->name);
66772 if (class->name_version > 1)
66773diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
66774index 91c32a0..b2c71c5 100644
66775--- a/kernel/lockdep_proc.c
66776+++ b/kernel/lockdep_proc.c
66777@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
66778
66779 static void print_name(struct seq_file *m, struct lock_class *class)
66780 {
66781- char str[128];
66782+ char str[KSYM_NAME_LEN];
66783 const char *name = class->name;
66784
66785 if (!name) {
66786diff --git a/kernel/module.c b/kernel/module.c
66787index 3d56b6f..2a22bd0 100644
66788--- a/kernel/module.c
66789+++ b/kernel/module.c
66790@@ -58,6 +58,7 @@
66791 #include <linux/jump_label.h>
66792 #include <linux/pfn.h>
66793 #include <linux/bsearch.h>
66794+#include <linux/grsecurity.h>
66795
66796 #define CREATE_TRACE_POINTS
66797 #include <trace/events/module.h>
66798@@ -113,7 +114,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
66799
66800 /* Bounds of module allocation, for speeding __module_address.
66801 * Protected by module_mutex. */
66802-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
66803+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
66804+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
66805
66806 int register_module_notifier(struct notifier_block * nb)
66807 {
66808@@ -277,7 +279,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66809 return true;
66810
66811 list_for_each_entry_rcu(mod, &modules, list) {
66812- struct symsearch arr[] = {
66813+ struct symsearch modarr[] = {
66814 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
66815 NOT_GPL_ONLY, false },
66816 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
66817@@ -299,7 +301,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66818 #endif
66819 };
66820
66821- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
66822+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
66823 return true;
66824 }
66825 return false;
66826@@ -431,7 +433,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
66827 static int percpu_modalloc(struct module *mod,
66828 unsigned long size, unsigned long align)
66829 {
66830- if (align > PAGE_SIZE) {
66831+ if (align-1 >= PAGE_SIZE) {
66832 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
66833 mod->name, align, PAGE_SIZE);
66834 align = PAGE_SIZE;
66835@@ -1001,7 +1003,7 @@ struct module_attribute module_uevent =
66836 static ssize_t show_coresize(struct module_attribute *mattr,
66837 struct module_kobject *mk, char *buffer)
66838 {
66839- return sprintf(buffer, "%u\n", mk->mod->core_size);
66840+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
66841 }
66842
66843 static struct module_attribute modinfo_coresize =
66844@@ -1010,7 +1012,7 @@ static struct module_attribute modinfo_coresize =
66845 static ssize_t show_initsize(struct module_attribute *mattr,
66846 struct module_kobject *mk, char *buffer)
66847 {
66848- return sprintf(buffer, "%u\n", mk->mod->init_size);
66849+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
66850 }
66851
66852 static struct module_attribute modinfo_initsize =
66853@@ -1224,7 +1226,7 @@ resolve_symbol_wait(struct module *mod,
66854 */
66855 #ifdef CONFIG_SYSFS
66856
66857-#ifdef CONFIG_KALLSYMS
66858+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66859 static inline bool sect_empty(const Elf_Shdr *sect)
66860 {
66861 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
66862@@ -1690,21 +1692,21 @@ static void set_section_ro_nx(void *base,
66863
66864 static void unset_module_core_ro_nx(struct module *mod)
66865 {
66866- set_page_attributes(mod->module_core + mod->core_text_size,
66867- mod->module_core + mod->core_size,
66868+ set_page_attributes(mod->module_core_rw,
66869+ mod->module_core_rw + mod->core_size_rw,
66870 set_memory_x);
66871- set_page_attributes(mod->module_core,
66872- mod->module_core + mod->core_ro_size,
66873+ set_page_attributes(mod->module_core_rx,
66874+ mod->module_core_rx + mod->core_size_rx,
66875 set_memory_rw);
66876 }
66877
66878 static void unset_module_init_ro_nx(struct module *mod)
66879 {
66880- set_page_attributes(mod->module_init + mod->init_text_size,
66881- mod->module_init + mod->init_size,
66882+ set_page_attributes(mod->module_init_rw,
66883+ mod->module_init_rw + mod->init_size_rw,
66884 set_memory_x);
66885- set_page_attributes(mod->module_init,
66886- mod->module_init + mod->init_ro_size,
66887+ set_page_attributes(mod->module_init_rx,
66888+ mod->module_init_rx + mod->init_size_rx,
66889 set_memory_rw);
66890 }
66891
66892@@ -1715,14 +1717,14 @@ void set_all_modules_text_rw(void)
66893
66894 mutex_lock(&module_mutex);
66895 list_for_each_entry_rcu(mod, &modules, list) {
66896- if ((mod->module_core) && (mod->core_text_size)) {
66897- set_page_attributes(mod->module_core,
66898- mod->module_core + mod->core_text_size,
66899+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
66900+ set_page_attributes(mod->module_core_rx,
66901+ mod->module_core_rx + mod->core_size_rx,
66902 set_memory_rw);
66903 }
66904- if ((mod->module_init) && (mod->init_text_size)) {
66905- set_page_attributes(mod->module_init,
66906- mod->module_init + mod->init_text_size,
66907+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
66908+ set_page_attributes(mod->module_init_rx,
66909+ mod->module_init_rx + mod->init_size_rx,
66910 set_memory_rw);
66911 }
66912 }
66913@@ -1736,14 +1738,14 @@ void set_all_modules_text_ro(void)
66914
66915 mutex_lock(&module_mutex);
66916 list_for_each_entry_rcu(mod, &modules, list) {
66917- if ((mod->module_core) && (mod->core_text_size)) {
66918- set_page_attributes(mod->module_core,
66919- mod->module_core + mod->core_text_size,
66920+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
66921+ set_page_attributes(mod->module_core_rx,
66922+ mod->module_core_rx + mod->core_size_rx,
66923 set_memory_ro);
66924 }
66925- if ((mod->module_init) && (mod->init_text_size)) {
66926- set_page_attributes(mod->module_init,
66927- mod->module_init + mod->init_text_size,
66928+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
66929+ set_page_attributes(mod->module_init_rx,
66930+ mod->module_init_rx + mod->init_size_rx,
66931 set_memory_ro);
66932 }
66933 }
66934@@ -1789,16 +1791,19 @@ static void free_module(struct module *mod)
66935
66936 /* This may be NULL, but that's OK */
66937 unset_module_init_ro_nx(mod);
66938- module_free(mod, mod->module_init);
66939+ module_free(mod, mod->module_init_rw);
66940+ module_free_exec(mod, mod->module_init_rx);
66941 kfree(mod->args);
66942 percpu_modfree(mod);
66943
66944 /* Free lock-classes: */
66945- lockdep_free_key_range(mod->module_core, mod->core_size);
66946+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
66947+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
66948
66949 /* Finally, free the core (containing the module structure) */
66950 unset_module_core_ro_nx(mod);
66951- module_free(mod, mod->module_core);
66952+ module_free_exec(mod, mod->module_core_rx);
66953+ module_free(mod, mod->module_core_rw);
66954
66955 #ifdef CONFIG_MPU
66956 update_protections(current->mm);
66957@@ -1867,10 +1872,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66958 unsigned int i;
66959 int ret = 0;
66960 const struct kernel_symbol *ksym;
66961+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66962+ int is_fs_load = 0;
66963+ int register_filesystem_found = 0;
66964+ char *p;
66965+
66966+ p = strstr(mod->args, "grsec_modharden_fs");
66967+ if (p) {
66968+ char *endptr = p + strlen("grsec_modharden_fs");
66969+ /* copy \0 as well */
66970+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
66971+ is_fs_load = 1;
66972+ }
66973+#endif
66974
66975 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
66976 const char *name = info->strtab + sym[i].st_name;
66977
66978+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66979+ /* it's a real shame this will never get ripped and copied
66980+ upstream! ;(
66981+ */
66982+ if (is_fs_load && !strcmp(name, "register_filesystem"))
66983+ register_filesystem_found = 1;
66984+#endif
66985+
66986 switch (sym[i].st_shndx) {
66987 case SHN_COMMON:
66988 /* We compiled with -fno-common. These are not
66989@@ -1891,7 +1917,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66990 ksym = resolve_symbol_wait(mod, info, name);
66991 /* Ok if resolved. */
66992 if (ksym && !IS_ERR(ksym)) {
66993+ pax_open_kernel();
66994 sym[i].st_value = ksym->value;
66995+ pax_close_kernel();
66996 break;
66997 }
66998
66999@@ -1910,11 +1938,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
67000 secbase = (unsigned long)mod_percpu(mod);
67001 else
67002 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
67003+ pax_open_kernel();
67004 sym[i].st_value += secbase;
67005+ pax_close_kernel();
67006 break;
67007 }
67008 }
67009
67010+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67011+ if (is_fs_load && !register_filesystem_found) {
67012+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
67013+ ret = -EPERM;
67014+ }
67015+#endif
67016+
67017 return ret;
67018 }
67019
67020@@ -2018,22 +2055,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
67021 || s->sh_entsize != ~0UL
67022 || strstarts(sname, ".init"))
67023 continue;
67024- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
67025+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67026+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
67027+ else
67028+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
67029 pr_debug("\t%s\n", sname);
67030 }
67031- switch (m) {
67032- case 0: /* executable */
67033- mod->core_size = debug_align(mod->core_size);
67034- mod->core_text_size = mod->core_size;
67035- break;
67036- case 1: /* RO: text and ro-data */
67037- mod->core_size = debug_align(mod->core_size);
67038- mod->core_ro_size = mod->core_size;
67039- break;
67040- case 3: /* whole core */
67041- mod->core_size = debug_align(mod->core_size);
67042- break;
67043- }
67044 }
67045
67046 pr_debug("Init section allocation order:\n");
67047@@ -2047,23 +2074,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
67048 || s->sh_entsize != ~0UL
67049 || !strstarts(sname, ".init"))
67050 continue;
67051- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
67052- | INIT_OFFSET_MASK);
67053+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67054+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
67055+ else
67056+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
67057+ s->sh_entsize |= INIT_OFFSET_MASK;
67058 pr_debug("\t%s\n", sname);
67059 }
67060- switch (m) {
67061- case 0: /* executable */
67062- mod->init_size = debug_align(mod->init_size);
67063- mod->init_text_size = mod->init_size;
67064- break;
67065- case 1: /* RO: text and ro-data */
67066- mod->init_size = debug_align(mod->init_size);
67067- mod->init_ro_size = mod->init_size;
67068- break;
67069- case 3: /* whole init */
67070- mod->init_size = debug_align(mod->init_size);
67071- break;
67072- }
67073 }
67074 }
67075
67076@@ -2235,7 +2252,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
67077
67078 /* Put symbol section at end of init part of module. */
67079 symsect->sh_flags |= SHF_ALLOC;
67080- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
67081+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
67082 info->index.sym) | INIT_OFFSET_MASK;
67083 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
67084
67085@@ -2250,13 +2267,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
67086 }
67087
67088 /* Append room for core symbols at end of core part. */
67089- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
67090- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
67091- mod->core_size += strtab_size;
67092+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
67093+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
67094+ mod->core_size_rx += strtab_size;
67095
67096 /* Put string table section at end of init part of module. */
67097 strsect->sh_flags |= SHF_ALLOC;
67098- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
67099+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
67100 info->index.str) | INIT_OFFSET_MASK;
67101 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
67102 }
67103@@ -2274,12 +2291,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
67104 /* Make sure we get permanent strtab: don't use info->strtab. */
67105 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
67106
67107+ pax_open_kernel();
67108+
67109 /* Set types up while we still have access to sections. */
67110 for (i = 0; i < mod->num_symtab; i++)
67111 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
67112
67113- mod->core_symtab = dst = mod->module_core + info->symoffs;
67114- mod->core_strtab = s = mod->module_core + info->stroffs;
67115+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
67116+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
67117 src = mod->symtab;
67118 *dst = *src;
67119 *s++ = 0;
67120@@ -2292,6 +2311,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
67121 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
67122 }
67123 mod->core_num_syms = ndst;
67124+
67125+ pax_close_kernel();
67126 }
67127 #else
67128 static inline void layout_symtab(struct module *mod, struct load_info *info)
67129@@ -2325,17 +2346,33 @@ void * __weak module_alloc(unsigned long size)
67130 return size == 0 ? NULL : vmalloc_exec(size);
67131 }
67132
67133-static void *module_alloc_update_bounds(unsigned long size)
67134+static void *module_alloc_update_bounds_rw(unsigned long size)
67135 {
67136 void *ret = module_alloc(size);
67137
67138 if (ret) {
67139 mutex_lock(&module_mutex);
67140 /* Update module bounds. */
67141- if ((unsigned long)ret < module_addr_min)
67142- module_addr_min = (unsigned long)ret;
67143- if ((unsigned long)ret + size > module_addr_max)
67144- module_addr_max = (unsigned long)ret + size;
67145+ if ((unsigned long)ret < module_addr_min_rw)
67146+ module_addr_min_rw = (unsigned long)ret;
67147+ if ((unsigned long)ret + size > module_addr_max_rw)
67148+ module_addr_max_rw = (unsigned long)ret + size;
67149+ mutex_unlock(&module_mutex);
67150+ }
67151+ return ret;
67152+}
67153+
67154+static void *module_alloc_update_bounds_rx(unsigned long size)
67155+{
67156+ void *ret = module_alloc_exec(size);
67157+
67158+ if (ret) {
67159+ mutex_lock(&module_mutex);
67160+ /* Update module bounds. */
67161+ if ((unsigned long)ret < module_addr_min_rx)
67162+ module_addr_min_rx = (unsigned long)ret;
67163+ if ((unsigned long)ret + size > module_addr_max_rx)
67164+ module_addr_max_rx = (unsigned long)ret + size;
67165 mutex_unlock(&module_mutex);
67166 }
67167 return ret;
67168@@ -2512,8 +2549,14 @@ static struct module *setup_load_info(struct load_info *info)
67169 static int check_modinfo(struct module *mod, struct load_info *info)
67170 {
67171 const char *modmagic = get_modinfo(info, "vermagic");
67172+ const char *license = get_modinfo(info, "license");
67173 int err;
67174
67175+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
67176+ if (!license || !license_is_gpl_compatible(license))
67177+ return -ENOEXEC;
67178+#endif
67179+
67180 /* This is allowed: modprobe --force will invalidate it. */
67181 if (!modmagic) {
67182 err = try_to_force_load(mod, "bad vermagic");
67183@@ -2536,7 +2579,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
67184 }
67185
67186 /* Set up license info based on the info section */
67187- set_license(mod, get_modinfo(info, "license"));
67188+ set_license(mod, license);
67189
67190 return 0;
67191 }
67192@@ -2630,7 +2673,7 @@ static int move_module(struct module *mod, struct load_info *info)
67193 void *ptr;
67194
67195 /* Do the allocs. */
67196- ptr = module_alloc_update_bounds(mod->core_size);
67197+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
67198 /*
67199 * The pointer to this block is stored in the module structure
67200 * which is inside the block. Just mark it as not being a
67201@@ -2640,23 +2683,50 @@ static int move_module(struct module *mod, struct load_info *info)
67202 if (!ptr)
67203 return -ENOMEM;
67204
67205- memset(ptr, 0, mod->core_size);
67206- mod->module_core = ptr;
67207+ memset(ptr, 0, mod->core_size_rw);
67208+ mod->module_core_rw = ptr;
67209
67210- ptr = module_alloc_update_bounds(mod->init_size);
67211+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
67212 /*
67213 * The pointer to this block is stored in the module structure
67214 * which is inside the block. This block doesn't need to be
67215 * scanned as it contains data and code that will be freed
67216 * after the module is initialized.
67217 */
67218- kmemleak_ignore(ptr);
67219- if (!ptr && mod->init_size) {
67220- module_free(mod, mod->module_core);
67221+ kmemleak_not_leak(ptr);
67222+ if (!ptr && mod->init_size_rw) {
67223+ module_free(mod, mod->module_core_rw);
67224 return -ENOMEM;
67225 }
67226- memset(ptr, 0, mod->init_size);
67227- mod->module_init = ptr;
67228+ memset(ptr, 0, mod->init_size_rw);
67229+ mod->module_init_rw = ptr;
67230+
67231+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
67232+ kmemleak_not_leak(ptr);
67233+ if (!ptr) {
67234+ module_free(mod, mod->module_init_rw);
67235+ module_free(mod, mod->module_core_rw);
67236+ return -ENOMEM;
67237+ }
67238+
67239+ pax_open_kernel();
67240+ memset(ptr, 0, mod->core_size_rx);
67241+ pax_close_kernel();
67242+ mod->module_core_rx = ptr;
67243+
67244+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
67245+ kmemleak_not_leak(ptr);
67246+ if (!ptr && mod->init_size_rx) {
67247+ module_free_exec(mod, mod->module_core_rx);
67248+ module_free(mod, mod->module_init_rw);
67249+ module_free(mod, mod->module_core_rw);
67250+ return -ENOMEM;
67251+ }
67252+
67253+ pax_open_kernel();
67254+ memset(ptr, 0, mod->init_size_rx);
67255+ pax_close_kernel();
67256+ mod->module_init_rx = ptr;
67257
67258 /* Transfer each section which specifies SHF_ALLOC */
67259 pr_debug("final section addresses:\n");
67260@@ -2667,16 +2737,45 @@ static int move_module(struct module *mod, struct load_info *info)
67261 if (!(shdr->sh_flags & SHF_ALLOC))
67262 continue;
67263
67264- if (shdr->sh_entsize & INIT_OFFSET_MASK)
67265- dest = mod->module_init
67266- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67267- else
67268- dest = mod->module_core + shdr->sh_entsize;
67269+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
67270+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67271+ dest = mod->module_init_rw
67272+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67273+ else
67274+ dest = mod->module_init_rx
67275+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67276+ } else {
67277+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67278+ dest = mod->module_core_rw + shdr->sh_entsize;
67279+ else
67280+ dest = mod->module_core_rx + shdr->sh_entsize;
67281+ }
67282+
67283+ if (shdr->sh_type != SHT_NOBITS) {
67284+
67285+#ifdef CONFIG_PAX_KERNEXEC
67286+#ifdef CONFIG_X86_64
67287+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
67288+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
67289+#endif
67290+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
67291+ pax_open_kernel();
67292+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67293+ pax_close_kernel();
67294+ } else
67295+#endif
67296
67297- if (shdr->sh_type != SHT_NOBITS)
67298 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67299+ }
67300 /* Update sh_addr to point to copy in image. */
67301- shdr->sh_addr = (unsigned long)dest;
67302+
67303+#ifdef CONFIG_PAX_KERNEXEC
67304+ if (shdr->sh_flags & SHF_EXECINSTR)
67305+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
67306+ else
67307+#endif
67308+
67309+ shdr->sh_addr = (unsigned long)dest;
67310 pr_debug("\t0x%lx %s\n",
67311 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
67312 }
67313@@ -2727,12 +2826,12 @@ static void flush_module_icache(const struct module *mod)
67314 * Do it before processing of module parameters, so the module
67315 * can provide parameter accessor functions of its own.
67316 */
67317- if (mod->module_init)
67318- flush_icache_range((unsigned long)mod->module_init,
67319- (unsigned long)mod->module_init
67320- + mod->init_size);
67321- flush_icache_range((unsigned long)mod->module_core,
67322- (unsigned long)mod->module_core + mod->core_size);
67323+ if (mod->module_init_rx)
67324+ flush_icache_range((unsigned long)mod->module_init_rx,
67325+ (unsigned long)mod->module_init_rx
67326+ + mod->init_size_rx);
67327+ flush_icache_range((unsigned long)mod->module_core_rx,
67328+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
67329
67330 set_fs(old_fs);
67331 }
67332@@ -2802,8 +2901,10 @@ out:
67333 static void module_deallocate(struct module *mod, struct load_info *info)
67334 {
67335 percpu_modfree(mod);
67336- module_free(mod, mod->module_init);
67337- module_free(mod, mod->module_core);
67338+ module_free_exec(mod, mod->module_init_rx);
67339+ module_free_exec(mod, mod->module_core_rx);
67340+ module_free(mod, mod->module_init_rw);
67341+ module_free(mod, mod->module_core_rw);
67342 }
67343
67344 int __weak module_finalize(const Elf_Ehdr *hdr,
67345@@ -2867,9 +2968,38 @@ static struct module *load_module(void __user *umod,
67346 if (err)
67347 goto free_unload;
67348
67349+ /* Now copy in args */
67350+ mod->args = strndup_user(uargs, ~0UL >> 1);
67351+ if (IS_ERR(mod->args)) {
67352+ err = PTR_ERR(mod->args);
67353+ goto free_unload;
67354+ }
67355+
67356 /* Set up MODINFO_ATTR fields */
67357 setup_modinfo(mod, &info);
67358
67359+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67360+ {
67361+ char *p, *p2;
67362+
67363+ if (strstr(mod->args, "grsec_modharden_netdev")) {
67364+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
67365+ err = -EPERM;
67366+ goto free_modinfo;
67367+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
67368+ p += strlen("grsec_modharden_normal");
67369+ p2 = strstr(p, "_");
67370+ if (p2) {
67371+ *p2 = '\0';
67372+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
67373+ *p2 = '_';
67374+ }
67375+ err = -EPERM;
67376+ goto free_modinfo;
67377+ }
67378+ }
67379+#endif
67380+
67381 /* Fix up syms, so that st_value is a pointer to location. */
67382 err = simplify_symbols(mod, &info);
67383 if (err < 0)
67384@@ -2885,13 +3015,6 @@ static struct module *load_module(void __user *umod,
67385
67386 flush_module_icache(mod);
67387
67388- /* Now copy in args */
67389- mod->args = strndup_user(uargs, ~0UL >> 1);
67390- if (IS_ERR(mod->args)) {
67391- err = PTR_ERR(mod->args);
67392- goto free_arch_cleanup;
67393- }
67394-
67395 /* Mark state as coming so strong_try_module_get() ignores us. */
67396 mod->state = MODULE_STATE_COMING;
67397
67398@@ -2948,11 +3071,10 @@ static struct module *load_module(void __user *umod,
67399 unlock:
67400 mutex_unlock(&module_mutex);
67401 synchronize_sched();
67402- kfree(mod->args);
67403- free_arch_cleanup:
67404 module_arch_cleanup(mod);
67405 free_modinfo:
67406 free_modinfo(mod);
67407+ kfree(mod->args);
67408 free_unload:
67409 module_unload_free(mod);
67410 free_module:
67411@@ -2993,16 +3115,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67412 MODULE_STATE_COMING, mod);
67413
67414 /* Set RO and NX regions for core */
67415- set_section_ro_nx(mod->module_core,
67416- mod->core_text_size,
67417- mod->core_ro_size,
67418- mod->core_size);
67419+ set_section_ro_nx(mod->module_core_rx,
67420+ mod->core_size_rx,
67421+ mod->core_size_rx,
67422+ mod->core_size_rx);
67423
67424 /* Set RO and NX regions for init */
67425- set_section_ro_nx(mod->module_init,
67426- mod->init_text_size,
67427- mod->init_ro_size,
67428- mod->init_size);
67429+ set_section_ro_nx(mod->module_init_rx,
67430+ mod->init_size_rx,
67431+ mod->init_size_rx,
67432+ mod->init_size_rx);
67433
67434 do_mod_ctors(mod);
67435 /* Start the module */
67436@@ -3048,11 +3170,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67437 mod->strtab = mod->core_strtab;
67438 #endif
67439 unset_module_init_ro_nx(mod);
67440- module_free(mod, mod->module_init);
67441- mod->module_init = NULL;
67442- mod->init_size = 0;
67443- mod->init_ro_size = 0;
67444- mod->init_text_size = 0;
67445+ module_free(mod, mod->module_init_rw);
67446+ module_free_exec(mod, mod->module_init_rx);
67447+ mod->module_init_rw = NULL;
67448+ mod->module_init_rx = NULL;
67449+ mod->init_size_rw = 0;
67450+ mod->init_size_rx = 0;
67451 mutex_unlock(&module_mutex);
67452
67453 return 0;
67454@@ -3083,10 +3206,16 @@ static const char *get_ksymbol(struct module *mod,
67455 unsigned long nextval;
67456
67457 /* At worse, next value is at end of module */
67458- if (within_module_init(addr, mod))
67459- nextval = (unsigned long)mod->module_init+mod->init_text_size;
67460+ if (within_module_init_rx(addr, mod))
67461+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
67462+ else if (within_module_init_rw(addr, mod))
67463+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
67464+ else if (within_module_core_rx(addr, mod))
67465+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
67466+ else if (within_module_core_rw(addr, mod))
67467+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
67468 else
67469- nextval = (unsigned long)mod->module_core+mod->core_text_size;
67470+ return NULL;
67471
67472 /* Scan for closest preceding symbol, and next symbol. (ELF
67473 starts real symbols at 1). */
67474@@ -3321,7 +3450,7 @@ static int m_show(struct seq_file *m, void *p)
67475 char buf[8];
67476
67477 seq_printf(m, "%s %u",
67478- mod->name, mod->init_size + mod->core_size);
67479+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
67480 print_unload_info(m, mod);
67481
67482 /* Informative for users. */
67483@@ -3330,7 +3459,7 @@ static int m_show(struct seq_file *m, void *p)
67484 mod->state == MODULE_STATE_COMING ? "Loading":
67485 "Live");
67486 /* Used by oprofile and other similar tools. */
67487- seq_printf(m, " 0x%pK", mod->module_core);
67488+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
67489
67490 /* Taints info */
67491 if (mod->taints)
67492@@ -3366,7 +3495,17 @@ static const struct file_operations proc_modules_operations = {
67493
67494 static int __init proc_modules_init(void)
67495 {
67496+#ifndef CONFIG_GRKERNSEC_HIDESYM
67497+#ifdef CONFIG_GRKERNSEC_PROC_USER
67498+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67499+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67500+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
67501+#else
67502 proc_create("modules", 0, NULL, &proc_modules_operations);
67503+#endif
67504+#else
67505+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67506+#endif
67507 return 0;
67508 }
67509 module_init(proc_modules_init);
67510@@ -3425,12 +3564,12 @@ struct module *__module_address(unsigned long addr)
67511 {
67512 struct module *mod;
67513
67514- if (addr < module_addr_min || addr > module_addr_max)
67515+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
67516+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
67517 return NULL;
67518
67519 list_for_each_entry_rcu(mod, &modules, list)
67520- if (within_module_core(addr, mod)
67521- || within_module_init(addr, mod))
67522+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
67523 return mod;
67524 return NULL;
67525 }
67526@@ -3464,11 +3603,20 @@ bool is_module_text_address(unsigned long addr)
67527 */
67528 struct module *__module_text_address(unsigned long addr)
67529 {
67530- struct module *mod = __module_address(addr);
67531+ struct module *mod;
67532+
67533+#ifdef CONFIG_X86_32
67534+ addr = ktla_ktva(addr);
67535+#endif
67536+
67537+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
67538+ return NULL;
67539+
67540+ mod = __module_address(addr);
67541+
67542 if (mod) {
67543 /* Make sure it's within the text section. */
67544- if (!within(addr, mod->module_init, mod->init_text_size)
67545- && !within(addr, mod->module_core, mod->core_text_size))
67546+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
67547 mod = NULL;
67548 }
67549 return mod;
67550diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
67551index 7e3443f..b2a1e6b 100644
67552--- a/kernel/mutex-debug.c
67553+++ b/kernel/mutex-debug.c
67554@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
67555 }
67556
67557 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67558- struct thread_info *ti)
67559+ struct task_struct *task)
67560 {
67561 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
67562
67563 /* Mark the current thread as blocked on the lock: */
67564- ti->task->blocked_on = waiter;
67565+ task->blocked_on = waiter;
67566 }
67567
67568 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67569- struct thread_info *ti)
67570+ struct task_struct *task)
67571 {
67572 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
67573- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
67574- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
67575- ti->task->blocked_on = NULL;
67576+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
67577+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
67578+ task->blocked_on = NULL;
67579
67580 list_del_init(&waiter->list);
67581 waiter->task = NULL;
67582diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
67583index 0799fd3..d06ae3b 100644
67584--- a/kernel/mutex-debug.h
67585+++ b/kernel/mutex-debug.h
67586@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
67587 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
67588 extern void debug_mutex_add_waiter(struct mutex *lock,
67589 struct mutex_waiter *waiter,
67590- struct thread_info *ti);
67591+ struct task_struct *task);
67592 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67593- struct thread_info *ti);
67594+ struct task_struct *task);
67595 extern void debug_mutex_unlock(struct mutex *lock);
67596 extern void debug_mutex_init(struct mutex *lock, const char *name,
67597 struct lock_class_key *key);
67598diff --git a/kernel/mutex.c b/kernel/mutex.c
67599index 89096dd..f91ebc5 100644
67600--- a/kernel/mutex.c
67601+++ b/kernel/mutex.c
67602@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67603 spin_lock_mutex(&lock->wait_lock, flags);
67604
67605 debug_mutex_lock_common(lock, &waiter);
67606- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
67607+ debug_mutex_add_waiter(lock, &waiter, task);
67608
67609 /* add waiting tasks to the end of the waitqueue (FIFO): */
67610 list_add_tail(&waiter.list, &lock->wait_list);
67611@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67612 * TASK_UNINTERRUPTIBLE case.)
67613 */
67614 if (unlikely(signal_pending_state(state, task))) {
67615- mutex_remove_waiter(lock, &waiter,
67616- task_thread_info(task));
67617+ mutex_remove_waiter(lock, &waiter, task);
67618 mutex_release(&lock->dep_map, 1, ip);
67619 spin_unlock_mutex(&lock->wait_lock, flags);
67620
67621@@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67622 done:
67623 lock_acquired(&lock->dep_map, ip);
67624 /* got the lock - rejoice! */
67625- mutex_remove_waiter(lock, &waiter, current_thread_info());
67626+ mutex_remove_waiter(lock, &waiter, task);
67627 mutex_set_owner(lock);
67628
67629 /* set it to 0 if there are no waiters left: */
67630diff --git a/kernel/padata.c b/kernel/padata.c
67631index b452599..5d68f4e 100644
67632--- a/kernel/padata.c
67633+++ b/kernel/padata.c
67634@@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
67635 padata->pd = pd;
67636 padata->cb_cpu = cb_cpu;
67637
67638- if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
67639- atomic_set(&pd->seq_nr, -1);
67640+ if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
67641+ atomic_set_unchecked(&pd->seq_nr, -1);
67642
67643- padata->seq_nr = atomic_inc_return(&pd->seq_nr);
67644+ padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
67645
67646 target_cpu = padata_cpu_hash(padata);
67647 queue = per_cpu_ptr(pd->pqueue, target_cpu);
67648@@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
67649 padata_init_pqueues(pd);
67650 padata_init_squeues(pd);
67651 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
67652- atomic_set(&pd->seq_nr, -1);
67653+ atomic_set_unchecked(&pd->seq_nr, -1);
67654 atomic_set(&pd->reorder_objects, 0);
67655 atomic_set(&pd->refcnt, 0);
67656 pd->pinst = pinst;
67657diff --git a/kernel/panic.c b/kernel/panic.c
67658index 80aed44..f291d37 100644
67659--- a/kernel/panic.c
67660+++ b/kernel/panic.c
67661@@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
67662 const char *board;
67663
67664 printk(KERN_WARNING "------------[ cut here ]------------\n");
67665- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
67666+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
67667 board = dmi_get_system_info(DMI_PRODUCT_NAME);
67668 if (board)
67669 printk(KERN_WARNING "Hardware name: %s\n", board);
67670@@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
67671 */
67672 void __stack_chk_fail(void)
67673 {
67674- panic("stack-protector: Kernel stack is corrupted in: %p\n",
67675+ dump_stack();
67676+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
67677 __builtin_return_address(0));
67678 }
67679 EXPORT_SYMBOL(__stack_chk_fail);
67680diff --git a/kernel/pid.c b/kernel/pid.c
67681index 9f08dfa..6765c40 100644
67682--- a/kernel/pid.c
67683+++ b/kernel/pid.c
67684@@ -33,6 +33,7 @@
67685 #include <linux/rculist.h>
67686 #include <linux/bootmem.h>
67687 #include <linux/hash.h>
67688+#include <linux/security.h>
67689 #include <linux/pid_namespace.h>
67690 #include <linux/init_task.h>
67691 #include <linux/syscalls.h>
67692@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
67693
67694 int pid_max = PID_MAX_DEFAULT;
67695
67696-#define RESERVED_PIDS 300
67697+#define RESERVED_PIDS 500
67698
67699 int pid_max_min = RESERVED_PIDS + 1;
67700 int pid_max_max = PID_MAX_LIMIT;
67701@@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
67702 */
67703 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
67704 {
67705+ struct task_struct *task;
67706+
67707 rcu_lockdep_assert(rcu_read_lock_held(),
67708 "find_task_by_pid_ns() needs rcu_read_lock()"
67709 " protection");
67710- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67711+
67712+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67713+
67714+ if (gr_pid_is_chrooted(task))
67715+ return NULL;
67716+
67717+ return task;
67718 }
67719
67720 struct task_struct *find_task_by_vpid(pid_t vnr)
67721@@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
67722 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
67723 }
67724
67725+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
67726+{
67727+ rcu_lockdep_assert(rcu_read_lock_held(),
67728+ "find_task_by_pid_ns() needs rcu_read_lock()"
67729+ " protection");
67730+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
67731+}
67732+
67733 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
67734 {
67735 struct pid *pid;
67736diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
67737index 125cb67..a4d1c30 100644
67738--- a/kernel/posix-cpu-timers.c
67739+++ b/kernel/posix-cpu-timers.c
67740@@ -6,6 +6,7 @@
67741 #include <linux/posix-timers.h>
67742 #include <linux/errno.h>
67743 #include <linux/math64.h>
67744+#include <linux/security.h>
67745 #include <asm/uaccess.h>
67746 #include <linux/kernel_stat.h>
67747 #include <trace/events/timer.h>
67748@@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
67749
67750 static __init int init_posix_cpu_timers(void)
67751 {
67752- struct k_clock process = {
67753+ static struct k_clock process = {
67754 .clock_getres = process_cpu_clock_getres,
67755 .clock_get = process_cpu_clock_get,
67756 .timer_create = process_cpu_timer_create,
67757 .nsleep = process_cpu_nsleep,
67758 .nsleep_restart = process_cpu_nsleep_restart,
67759 };
67760- struct k_clock thread = {
67761+ static struct k_clock thread = {
67762 .clock_getres = thread_cpu_clock_getres,
67763 .clock_get = thread_cpu_clock_get,
67764 .timer_create = thread_cpu_timer_create,
67765diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
67766index 69185ae..cc2847a 100644
67767--- a/kernel/posix-timers.c
67768+++ b/kernel/posix-timers.c
67769@@ -43,6 +43,7 @@
67770 #include <linux/idr.h>
67771 #include <linux/posix-clock.h>
67772 #include <linux/posix-timers.h>
67773+#include <linux/grsecurity.h>
67774 #include <linux/syscalls.h>
67775 #include <linux/wait.h>
67776 #include <linux/workqueue.h>
67777@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
67778 * which we beg off on and pass to do_sys_settimeofday().
67779 */
67780
67781-static struct k_clock posix_clocks[MAX_CLOCKS];
67782+static struct k_clock *posix_clocks[MAX_CLOCKS];
67783
67784 /*
67785 * These ones are defined below.
67786@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
67787 */
67788 static __init int init_posix_timers(void)
67789 {
67790- struct k_clock clock_realtime = {
67791+ static struct k_clock clock_realtime = {
67792 .clock_getres = hrtimer_get_res,
67793 .clock_get = posix_clock_realtime_get,
67794 .clock_set = posix_clock_realtime_set,
67795@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
67796 .timer_get = common_timer_get,
67797 .timer_del = common_timer_del,
67798 };
67799- struct k_clock clock_monotonic = {
67800+ static struct k_clock clock_monotonic = {
67801 .clock_getres = hrtimer_get_res,
67802 .clock_get = posix_ktime_get_ts,
67803 .nsleep = common_nsleep,
67804@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
67805 .timer_get = common_timer_get,
67806 .timer_del = common_timer_del,
67807 };
67808- struct k_clock clock_monotonic_raw = {
67809+ static struct k_clock clock_monotonic_raw = {
67810 .clock_getres = hrtimer_get_res,
67811 .clock_get = posix_get_monotonic_raw,
67812 };
67813- struct k_clock clock_realtime_coarse = {
67814+ static struct k_clock clock_realtime_coarse = {
67815 .clock_getres = posix_get_coarse_res,
67816 .clock_get = posix_get_realtime_coarse,
67817 };
67818- struct k_clock clock_monotonic_coarse = {
67819+ static struct k_clock clock_monotonic_coarse = {
67820 .clock_getres = posix_get_coarse_res,
67821 .clock_get = posix_get_monotonic_coarse,
67822 };
67823- struct k_clock clock_boottime = {
67824+ static struct k_clock clock_boottime = {
67825 .clock_getres = hrtimer_get_res,
67826 .clock_get = posix_get_boottime,
67827 .nsleep = common_nsleep,
67828@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
67829 return;
67830 }
67831
67832- posix_clocks[clock_id] = *new_clock;
67833+ posix_clocks[clock_id] = new_clock;
67834 }
67835 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
67836
67837@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
67838 return (id & CLOCKFD_MASK) == CLOCKFD ?
67839 &clock_posix_dynamic : &clock_posix_cpu;
67840
67841- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
67842+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
67843 return NULL;
67844- return &posix_clocks[id];
67845+ return posix_clocks[id];
67846 }
67847
67848 static int common_timer_create(struct k_itimer *new_timer)
67849@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
67850 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
67851 return -EFAULT;
67852
67853+ /* only the CLOCK_REALTIME clock can be set, all other clocks
67854+ have their clock_set fptr set to a nosettime dummy function
67855+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
67856+ call common_clock_set, which calls do_sys_settimeofday, which
67857+ we hook
67858+ */
67859+
67860 return kc->clock_set(which_clock, &new_tp);
67861 }
67862
67863diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
67864index d523593..68197a4 100644
67865--- a/kernel/power/poweroff.c
67866+++ b/kernel/power/poweroff.c
67867@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
67868 .enable_mask = SYSRQ_ENABLE_BOOT,
67869 };
67870
67871-static int pm_sysrq_init(void)
67872+static int __init pm_sysrq_init(void)
67873 {
67874 register_sysrq_key('o', &sysrq_poweroff_op);
67875 return 0;
67876diff --git a/kernel/power/process.c b/kernel/power/process.c
67877index 7e42645..3d43df1 100644
67878--- a/kernel/power/process.c
67879+++ b/kernel/power/process.c
67880@@ -32,6 +32,7 @@ static int try_to_freeze_tasks(bool user_only)
67881 u64 elapsed_csecs64;
67882 unsigned int elapsed_csecs;
67883 bool wakeup = false;
67884+ bool timedout = false;
67885
67886 do_gettimeofday(&start);
67887
67888@@ -42,6 +43,8 @@ static int try_to_freeze_tasks(bool user_only)
67889
67890 while (true) {
67891 todo = 0;
67892+ if (time_after(jiffies, end_time))
67893+ timedout = true;
67894 read_lock(&tasklist_lock);
67895 do_each_thread(g, p) {
67896 if (p == current || !freeze_task(p))
67897@@ -59,9 +62,13 @@ static int try_to_freeze_tasks(bool user_only)
67898 * try_to_stop() after schedule() in ptrace/signal
67899 * stop sees TIF_FREEZE.
67900 */
67901- if (!task_is_stopped_or_traced(p) &&
67902- !freezer_should_skip(p))
67903+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67904 todo++;
67905+ if (timedout) {
67906+ printk(KERN_ERR "Task refusing to freeze:\n");
67907+ sched_show_task(p);
67908+ }
67909+ }
67910 } while_each_thread(g, p);
67911 read_unlock(&tasklist_lock);
67912
67913@@ -70,7 +77,7 @@ static int try_to_freeze_tasks(bool user_only)
67914 todo += wq_busy;
67915 }
67916
67917- if (!todo || time_after(jiffies, end_time))
67918+ if (!todo || timedout)
67919 break;
67920
67921 if (pm_wakeup_pending()) {
67922diff --git a/kernel/printk.c b/kernel/printk.c
67923index 32690a0..cd7c798 100644
67924--- a/kernel/printk.c
67925+++ b/kernel/printk.c
67926@@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
67927 if (from_file && type != SYSLOG_ACTION_OPEN)
67928 return 0;
67929
67930+#ifdef CONFIG_GRKERNSEC_DMESG
67931+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
67932+ return -EPERM;
67933+#endif
67934+
67935 if (syslog_action_restricted(type)) {
67936 if (capable(CAP_SYSLOG))
67937 return 0;
67938diff --git a/kernel/profile.c b/kernel/profile.c
67939index 76b8e77..a2930e8 100644
67940--- a/kernel/profile.c
67941+++ b/kernel/profile.c
67942@@ -39,7 +39,7 @@ struct profile_hit {
67943 /* Oprofile timer tick hook */
67944 static int (*timer_hook)(struct pt_regs *) __read_mostly;
67945
67946-static atomic_t *prof_buffer;
67947+static atomic_unchecked_t *prof_buffer;
67948 static unsigned long prof_len, prof_shift;
67949
67950 int prof_on __read_mostly;
67951@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
67952 hits[i].pc = 0;
67953 continue;
67954 }
67955- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67956+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67957 hits[i].hits = hits[i].pc = 0;
67958 }
67959 }
67960@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67961 * Add the current hit(s) and flush the write-queue out
67962 * to the global buffer:
67963 */
67964- atomic_add(nr_hits, &prof_buffer[pc]);
67965+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
67966 for (i = 0; i < NR_PROFILE_HIT; ++i) {
67967- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67968+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67969 hits[i].pc = hits[i].hits = 0;
67970 }
67971 out:
67972@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67973 {
67974 unsigned long pc;
67975 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
67976- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67977+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67978 }
67979 #endif /* !CONFIG_SMP */
67980
67981@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
67982 return -EFAULT;
67983 buf++; p++; count--; read++;
67984 }
67985- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
67986+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
67987 if (copy_to_user(buf, (void *)pnt, count))
67988 return -EFAULT;
67989 read += count;
67990@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
67991 }
67992 #endif
67993 profile_discard_flip_buffers();
67994- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
67995+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
67996 return count;
67997 }
67998
67999diff --git a/kernel/ptrace.c b/kernel/ptrace.c
68000index 00ab2ca..d237f61 100644
68001--- a/kernel/ptrace.c
68002+++ b/kernel/ptrace.c
68003@@ -285,7 +285,7 @@ static int ptrace_attach(struct task_struct *task, long request,
68004 task->ptrace = PT_PTRACED;
68005 if (seize)
68006 task->ptrace |= PT_SEIZED;
68007- if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
68008+ if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
68009 task->ptrace |= PT_PTRACE_CAP;
68010
68011 __ptrace_link(task, current);
68012@@ -491,7 +491,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
68013 break;
68014 return -EIO;
68015 }
68016- if (copy_to_user(dst, buf, retval))
68017+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
68018 return -EFAULT;
68019 copied += retval;
68020 src += retval;
68021@@ -688,7 +688,7 @@ int ptrace_request(struct task_struct *child, long request,
68022 bool seized = child->ptrace & PT_SEIZED;
68023 int ret = -EIO;
68024 siginfo_t siginfo, *si;
68025- void __user *datavp = (void __user *) data;
68026+ void __user *datavp = (__force void __user *) data;
68027 unsigned long __user *datalp = datavp;
68028 unsigned long flags;
68029
68030@@ -890,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
68031 goto out;
68032 }
68033
68034+ if (gr_handle_ptrace(child, request)) {
68035+ ret = -EPERM;
68036+ goto out_put_task_struct;
68037+ }
68038+
68039 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68040 ret = ptrace_attach(child, request, data);
68041 /*
68042 * Some architectures need to do book-keeping after
68043 * a ptrace attach.
68044 */
68045- if (!ret)
68046+ if (!ret) {
68047 arch_ptrace_attach(child);
68048+ gr_audit_ptrace(child);
68049+ }
68050 goto out_put_task_struct;
68051 }
68052
68053@@ -923,7 +930,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
68054 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
68055 if (copied != sizeof(tmp))
68056 return -EIO;
68057- return put_user(tmp, (unsigned long __user *)data);
68058+ return put_user(tmp, (__force unsigned long __user *)data);
68059 }
68060
68061 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
68062@@ -1033,14 +1040,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
68063 goto out;
68064 }
68065
68066+ if (gr_handle_ptrace(child, request)) {
68067+ ret = -EPERM;
68068+ goto out_put_task_struct;
68069+ }
68070+
68071 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68072 ret = ptrace_attach(child, request, data);
68073 /*
68074 * Some architectures need to do book-keeping after
68075 * a ptrace attach.
68076 */
68077- if (!ret)
68078+ if (!ret) {
68079 arch_ptrace_attach(child);
68080+ gr_audit_ptrace(child);
68081+ }
68082 goto out_put_task_struct;
68083 }
68084
68085diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
68086index 977296d..c4744dc 100644
68087--- a/kernel/rcutiny.c
68088+++ b/kernel/rcutiny.c
68089@@ -46,7 +46,7 @@
68090 struct rcu_ctrlblk;
68091 static void invoke_rcu_callbacks(void);
68092 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
68093-static void rcu_process_callbacks(struct softirq_action *unused);
68094+static void rcu_process_callbacks(void);
68095 static void __call_rcu(struct rcu_head *head,
68096 void (*func)(struct rcu_head *rcu),
68097 struct rcu_ctrlblk *rcp);
68098@@ -297,7 +297,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
68099 rcu_is_callbacks_kthread()));
68100 }
68101
68102-static void rcu_process_callbacks(struct softirq_action *unused)
68103+static void rcu_process_callbacks(void)
68104 {
68105 __rcu_process_callbacks(&rcu_sched_ctrlblk);
68106 __rcu_process_callbacks(&rcu_bh_ctrlblk);
68107diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
68108index 9cb1ae4..aac7d3e 100644
68109--- a/kernel/rcutiny_plugin.h
68110+++ b/kernel/rcutiny_plugin.h
68111@@ -920,7 +920,7 @@ static int rcu_kthread(void *arg)
68112 have_rcu_kthread_work = morework;
68113 local_irq_restore(flags);
68114 if (work)
68115- rcu_process_callbacks(NULL);
68116+ rcu_process_callbacks();
68117 schedule_timeout_interruptible(1); /* Leave CPU for others. */
68118 }
68119
68120diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
68121index a58ac28..196a3d8 100644
68122--- a/kernel/rcutorture.c
68123+++ b/kernel/rcutorture.c
68124@@ -148,12 +148,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
68125 { 0 };
68126 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
68127 { 0 };
68128-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
68129-static atomic_t n_rcu_torture_alloc;
68130-static atomic_t n_rcu_torture_alloc_fail;
68131-static atomic_t n_rcu_torture_free;
68132-static atomic_t n_rcu_torture_mberror;
68133-static atomic_t n_rcu_torture_error;
68134+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
68135+static atomic_unchecked_t n_rcu_torture_alloc;
68136+static atomic_unchecked_t n_rcu_torture_alloc_fail;
68137+static atomic_unchecked_t n_rcu_torture_free;
68138+static atomic_unchecked_t n_rcu_torture_mberror;
68139+static atomic_unchecked_t n_rcu_torture_error;
68140 static long n_rcu_torture_boost_ktrerror;
68141 static long n_rcu_torture_boost_rterror;
68142 static long n_rcu_torture_boost_failure;
68143@@ -243,11 +243,11 @@ rcu_torture_alloc(void)
68144
68145 spin_lock_bh(&rcu_torture_lock);
68146 if (list_empty(&rcu_torture_freelist)) {
68147- atomic_inc(&n_rcu_torture_alloc_fail);
68148+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
68149 spin_unlock_bh(&rcu_torture_lock);
68150 return NULL;
68151 }
68152- atomic_inc(&n_rcu_torture_alloc);
68153+ atomic_inc_unchecked(&n_rcu_torture_alloc);
68154 p = rcu_torture_freelist.next;
68155 list_del_init(p);
68156 spin_unlock_bh(&rcu_torture_lock);
68157@@ -260,7 +260,7 @@ rcu_torture_alloc(void)
68158 static void
68159 rcu_torture_free(struct rcu_torture *p)
68160 {
68161- atomic_inc(&n_rcu_torture_free);
68162+ atomic_inc_unchecked(&n_rcu_torture_free);
68163 spin_lock_bh(&rcu_torture_lock);
68164 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
68165 spin_unlock_bh(&rcu_torture_lock);
68166@@ -380,7 +380,7 @@ rcu_torture_cb(struct rcu_head *p)
68167 i = rp->rtort_pipe_count;
68168 if (i > RCU_TORTURE_PIPE_LEN)
68169 i = RCU_TORTURE_PIPE_LEN;
68170- atomic_inc(&rcu_torture_wcount[i]);
68171+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
68172 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68173 rp->rtort_mbtest = 0;
68174 rcu_torture_free(rp);
68175@@ -427,7 +427,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
68176 i = rp->rtort_pipe_count;
68177 if (i > RCU_TORTURE_PIPE_LEN)
68178 i = RCU_TORTURE_PIPE_LEN;
68179- atomic_inc(&rcu_torture_wcount[i]);
68180+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
68181 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68182 rp->rtort_mbtest = 0;
68183 list_del(&rp->rtort_free);
68184@@ -916,7 +916,7 @@ rcu_torture_writer(void *arg)
68185 i = old_rp->rtort_pipe_count;
68186 if (i > RCU_TORTURE_PIPE_LEN)
68187 i = RCU_TORTURE_PIPE_LEN;
68188- atomic_inc(&rcu_torture_wcount[i]);
68189+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
68190 old_rp->rtort_pipe_count++;
68191 cur_ops->deferred_free(old_rp);
68192 }
68193@@ -997,7 +997,7 @@ static void rcu_torture_timer(unsigned long unused)
68194 return;
68195 }
68196 if (p->rtort_mbtest == 0)
68197- atomic_inc(&n_rcu_torture_mberror);
68198+ atomic_inc_unchecked(&n_rcu_torture_mberror);
68199 spin_lock(&rand_lock);
68200 cur_ops->read_delay(&rand);
68201 n_rcu_torture_timers++;
68202@@ -1061,7 +1061,7 @@ rcu_torture_reader(void *arg)
68203 continue;
68204 }
68205 if (p->rtort_mbtest == 0)
68206- atomic_inc(&n_rcu_torture_mberror);
68207+ atomic_inc_unchecked(&n_rcu_torture_mberror);
68208 cur_ops->read_delay(&rand);
68209 preempt_disable();
68210 pipe_count = p->rtort_pipe_count;
68211@@ -1123,10 +1123,10 @@ rcu_torture_printk(char *page)
68212 rcu_torture_current,
68213 rcu_torture_current_version,
68214 list_empty(&rcu_torture_freelist),
68215- atomic_read(&n_rcu_torture_alloc),
68216- atomic_read(&n_rcu_torture_alloc_fail),
68217- atomic_read(&n_rcu_torture_free),
68218- atomic_read(&n_rcu_torture_mberror),
68219+ atomic_read_unchecked(&n_rcu_torture_alloc),
68220+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
68221+ atomic_read_unchecked(&n_rcu_torture_free),
68222+ atomic_read_unchecked(&n_rcu_torture_mberror),
68223 n_rcu_torture_boost_ktrerror,
68224 n_rcu_torture_boost_rterror,
68225 n_rcu_torture_boost_failure,
68226@@ -1136,7 +1136,7 @@ rcu_torture_printk(char *page)
68227 n_online_attempts,
68228 n_offline_successes,
68229 n_offline_attempts);
68230- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
68231+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
68232 n_rcu_torture_boost_ktrerror != 0 ||
68233 n_rcu_torture_boost_rterror != 0 ||
68234 n_rcu_torture_boost_failure != 0)
68235@@ -1144,7 +1144,7 @@ rcu_torture_printk(char *page)
68236 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
68237 if (i > 1) {
68238 cnt += sprintf(&page[cnt], "!!! ");
68239- atomic_inc(&n_rcu_torture_error);
68240+ atomic_inc_unchecked(&n_rcu_torture_error);
68241 WARN_ON_ONCE(1);
68242 }
68243 cnt += sprintf(&page[cnt], "Reader Pipe: ");
68244@@ -1158,7 +1158,7 @@ rcu_torture_printk(char *page)
68245 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
68246 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68247 cnt += sprintf(&page[cnt], " %d",
68248- atomic_read(&rcu_torture_wcount[i]));
68249+ atomic_read_unchecked(&rcu_torture_wcount[i]));
68250 }
68251 cnt += sprintf(&page[cnt], "\n");
68252 if (cur_ops->stats)
68253@@ -1600,7 +1600,7 @@ rcu_torture_cleanup(void)
68254
68255 if (cur_ops->cleanup)
68256 cur_ops->cleanup();
68257- if (atomic_read(&n_rcu_torture_error))
68258+ if (atomic_read_unchecked(&n_rcu_torture_error))
68259 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
68260 else
68261 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
68262@@ -1664,17 +1664,17 @@ rcu_torture_init(void)
68263
68264 rcu_torture_current = NULL;
68265 rcu_torture_current_version = 0;
68266- atomic_set(&n_rcu_torture_alloc, 0);
68267- atomic_set(&n_rcu_torture_alloc_fail, 0);
68268- atomic_set(&n_rcu_torture_free, 0);
68269- atomic_set(&n_rcu_torture_mberror, 0);
68270- atomic_set(&n_rcu_torture_error, 0);
68271+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
68272+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
68273+ atomic_set_unchecked(&n_rcu_torture_free, 0);
68274+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
68275+ atomic_set_unchecked(&n_rcu_torture_error, 0);
68276 n_rcu_torture_boost_ktrerror = 0;
68277 n_rcu_torture_boost_rterror = 0;
68278 n_rcu_torture_boost_failure = 0;
68279 n_rcu_torture_boosts = 0;
68280 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
68281- atomic_set(&rcu_torture_wcount[i], 0);
68282+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
68283 for_each_possible_cpu(cpu) {
68284 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68285 per_cpu(rcu_torture_count, cpu)[i] = 0;
68286diff --git a/kernel/rcutree.c b/kernel/rcutree.c
68287index 6c4a672..70f3202 100644
68288--- a/kernel/rcutree.c
68289+++ b/kernel/rcutree.c
68290@@ -363,9 +363,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
68291 rcu_prepare_for_idle(smp_processor_id());
68292 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68293 smp_mb__before_atomic_inc(); /* See above. */
68294- atomic_inc(&rdtp->dynticks);
68295+ atomic_inc_unchecked(&rdtp->dynticks);
68296 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
68297- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68298+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68299 }
68300
68301 /**
68302@@ -438,10 +438,10 @@ void rcu_irq_exit(void)
68303 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
68304 {
68305 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
68306- atomic_inc(&rdtp->dynticks);
68307+ atomic_inc_unchecked(&rdtp->dynticks);
68308 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68309 smp_mb__after_atomic_inc(); /* See above. */
68310- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68311+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68312 rcu_cleanup_after_idle(smp_processor_id());
68313 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
68314 if (!is_idle_task(current)) {
68315@@ -531,14 +531,14 @@ void rcu_nmi_enter(void)
68316 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
68317
68318 if (rdtp->dynticks_nmi_nesting == 0 &&
68319- (atomic_read(&rdtp->dynticks) & 0x1))
68320+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
68321 return;
68322 rdtp->dynticks_nmi_nesting++;
68323 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
68324- atomic_inc(&rdtp->dynticks);
68325+ atomic_inc_unchecked(&rdtp->dynticks);
68326 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68327 smp_mb__after_atomic_inc(); /* See above. */
68328- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68329+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68330 }
68331
68332 /**
68333@@ -557,9 +557,9 @@ void rcu_nmi_exit(void)
68334 return;
68335 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68336 smp_mb__before_atomic_inc(); /* See above. */
68337- atomic_inc(&rdtp->dynticks);
68338+ atomic_inc_unchecked(&rdtp->dynticks);
68339 smp_mb__after_atomic_inc(); /* Force delay to next write. */
68340- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68341+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68342 }
68343
68344 #ifdef CONFIG_PROVE_RCU
68345@@ -575,7 +575,7 @@ int rcu_is_cpu_idle(void)
68346 int ret;
68347
68348 preempt_disable();
68349- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
68350+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
68351 preempt_enable();
68352 return ret;
68353 }
68354@@ -604,7 +604,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
68355 */
68356 static int dyntick_save_progress_counter(struct rcu_data *rdp)
68357 {
68358- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
68359+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68360 return (rdp->dynticks_snap & 0x1) == 0;
68361 }
68362
68363@@ -619,7 +619,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
68364 unsigned int curr;
68365 unsigned int snap;
68366
68367- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
68368+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68369 snap = (unsigned int)rdp->dynticks_snap;
68370
68371 /*
68372@@ -1667,7 +1667,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
68373 /*
68374 * Do RCU core processing for the current CPU.
68375 */
68376-static void rcu_process_callbacks(struct softirq_action *unused)
68377+static void rcu_process_callbacks(void)
68378 {
68379 trace_rcu_utilization("Start RCU core");
68380 __rcu_process_callbacks(&rcu_sched_state,
68381@@ -2030,7 +2030,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
68382 rdp->qlen = 0;
68383 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
68384 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
68385- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
68386+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
68387 rdp->cpu = cpu;
68388 rdp->rsp = rsp;
68389 raw_spin_unlock_irqrestore(&rnp->lock, flags);
68390@@ -2058,8 +2058,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
68391 rdp->n_force_qs_snap = rsp->n_force_qs;
68392 rdp->blimit = blimit;
68393 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING;
68394- atomic_set(&rdp->dynticks->dynticks,
68395- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
68396+ atomic_set_unchecked(&rdp->dynticks->dynticks,
68397+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
68398 rcu_prepare_for_idle_init(cpu);
68399 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
68400
68401diff --git a/kernel/rcutree.h b/kernel/rcutree.h
68402index fddff92..2c08359 100644
68403--- a/kernel/rcutree.h
68404+++ b/kernel/rcutree.h
68405@@ -87,7 +87,7 @@ struct rcu_dynticks {
68406 long long dynticks_nesting; /* Track irq/process nesting level. */
68407 /* Process level is worth LLONG_MAX/2. */
68408 int dynticks_nmi_nesting; /* Track NMI nesting level. */
68409- atomic_t dynticks; /* Even value for idle, else odd. */
68410+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
68411 };
68412
68413 /* RCU's kthread states for tracing. */
68414diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
68415index 8bb35d7..6ea0a463 100644
68416--- a/kernel/rcutree_plugin.h
68417+++ b/kernel/rcutree_plugin.h
68418@@ -850,7 +850,7 @@ void synchronize_rcu_expedited(void)
68419
68420 /* Clean up and exit. */
68421 smp_mb(); /* ensure expedited GP seen before counter increment. */
68422- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
68423+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
68424 unlock_mb_ret:
68425 mutex_unlock(&sync_rcu_preempt_exp_mutex);
68426 mb_ret:
68427@@ -1833,8 +1833,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
68428
68429 #else /* #ifndef CONFIG_SMP */
68430
68431-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
68432-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
68433+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
68434+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
68435
68436 static int synchronize_sched_expedited_cpu_stop(void *data)
68437 {
68438@@ -1889,7 +1889,7 @@ void synchronize_sched_expedited(void)
68439 int firstsnap, s, snap, trycount = 0;
68440
68441 /* Note that atomic_inc_return() implies full memory barrier. */
68442- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
68443+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
68444 get_online_cpus();
68445
68446 /*
68447@@ -1910,7 +1910,7 @@ void synchronize_sched_expedited(void)
68448 }
68449
68450 /* Check to see if someone else did our work for us. */
68451- s = atomic_read(&sync_sched_expedited_done);
68452+ s = atomic_read_unchecked(&sync_sched_expedited_done);
68453 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
68454 smp_mb(); /* ensure test happens before caller kfree */
68455 return;
68456@@ -1925,7 +1925,7 @@ void synchronize_sched_expedited(void)
68457 * grace period works for us.
68458 */
68459 get_online_cpus();
68460- snap = atomic_read(&sync_sched_expedited_started);
68461+ snap = atomic_read_unchecked(&sync_sched_expedited_started);
68462 smp_mb(); /* ensure read is before try_stop_cpus(). */
68463 }
68464
68465@@ -1936,12 +1936,12 @@ void synchronize_sched_expedited(void)
68466 * than we did beat us to the punch.
68467 */
68468 do {
68469- s = atomic_read(&sync_sched_expedited_done);
68470+ s = atomic_read_unchecked(&sync_sched_expedited_done);
68471 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
68472 smp_mb(); /* ensure test happens before caller kfree */
68473 break;
68474 }
68475- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
68476+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
68477
68478 put_online_cpus();
68479 }
68480diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
68481index 654cfe6..c0b28e2 100644
68482--- a/kernel/rcutree_trace.c
68483+++ b/kernel/rcutree_trace.c
68484@@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
68485 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
68486 rdp->qs_pending);
68487 seq_printf(m, " dt=%d/%llx/%d df=%lu",
68488- atomic_read(&rdp->dynticks->dynticks),
68489+ atomic_read_unchecked(&rdp->dynticks->dynticks),
68490 rdp->dynticks->dynticks_nesting,
68491 rdp->dynticks->dynticks_nmi_nesting,
68492 rdp->dynticks_fqs);
68493@@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
68494 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
68495 rdp->qs_pending);
68496 seq_printf(m, ",%d,%llx,%d,%lu",
68497- atomic_read(&rdp->dynticks->dynticks),
68498+ atomic_read_unchecked(&rdp->dynticks->dynticks),
68499 rdp->dynticks->dynticks_nesting,
68500 rdp->dynticks->dynticks_nmi_nesting,
68501 rdp->dynticks_fqs);
68502diff --git a/kernel/resource.c b/kernel/resource.c
68503index 7640b3a..5879283 100644
68504--- a/kernel/resource.c
68505+++ b/kernel/resource.c
68506@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
68507
68508 static int __init ioresources_init(void)
68509 {
68510+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68511+#ifdef CONFIG_GRKERNSEC_PROC_USER
68512+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
68513+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
68514+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68515+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
68516+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
68517+#endif
68518+#else
68519 proc_create("ioports", 0, NULL, &proc_ioports_operations);
68520 proc_create("iomem", 0, NULL, &proc_iomem_operations);
68521+#endif
68522 return 0;
68523 }
68524 __initcall(ioresources_init);
68525diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
68526index 98ec494..4241d6d 100644
68527--- a/kernel/rtmutex-tester.c
68528+++ b/kernel/rtmutex-tester.c
68529@@ -20,7 +20,7 @@
68530 #define MAX_RT_TEST_MUTEXES 8
68531
68532 static spinlock_t rttest_lock;
68533-static atomic_t rttest_event;
68534+static atomic_unchecked_t rttest_event;
68535
68536 struct test_thread_data {
68537 int opcode;
68538@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68539
68540 case RTTEST_LOCKCONT:
68541 td->mutexes[td->opdata] = 1;
68542- td->event = atomic_add_return(1, &rttest_event);
68543+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68544 return 0;
68545
68546 case RTTEST_RESET:
68547@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68548 return 0;
68549
68550 case RTTEST_RESETEVENT:
68551- atomic_set(&rttest_event, 0);
68552+ atomic_set_unchecked(&rttest_event, 0);
68553 return 0;
68554
68555 default:
68556@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68557 return ret;
68558
68559 td->mutexes[id] = 1;
68560- td->event = atomic_add_return(1, &rttest_event);
68561+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68562 rt_mutex_lock(&mutexes[id]);
68563- td->event = atomic_add_return(1, &rttest_event);
68564+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68565 td->mutexes[id] = 4;
68566 return 0;
68567
68568@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68569 return ret;
68570
68571 td->mutexes[id] = 1;
68572- td->event = atomic_add_return(1, &rttest_event);
68573+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68574 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
68575- td->event = atomic_add_return(1, &rttest_event);
68576+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68577 td->mutexes[id] = ret ? 0 : 4;
68578 return ret ? -EINTR : 0;
68579
68580@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68581 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
68582 return ret;
68583
68584- td->event = atomic_add_return(1, &rttest_event);
68585+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68586 rt_mutex_unlock(&mutexes[id]);
68587- td->event = atomic_add_return(1, &rttest_event);
68588+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68589 td->mutexes[id] = 0;
68590 return 0;
68591
68592@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68593 break;
68594
68595 td->mutexes[dat] = 2;
68596- td->event = atomic_add_return(1, &rttest_event);
68597+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68598 break;
68599
68600 default:
68601@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68602 return;
68603
68604 td->mutexes[dat] = 3;
68605- td->event = atomic_add_return(1, &rttest_event);
68606+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68607 break;
68608
68609 case RTTEST_LOCKNOWAIT:
68610@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68611 return;
68612
68613 td->mutexes[dat] = 1;
68614- td->event = atomic_add_return(1, &rttest_event);
68615+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68616 return;
68617
68618 default:
68619diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
68620index e8a1f83..363d17d 100644
68621--- a/kernel/sched/auto_group.c
68622+++ b/kernel/sched/auto_group.c
68623@@ -11,7 +11,7 @@
68624
68625 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
68626 static struct autogroup autogroup_default;
68627-static atomic_t autogroup_seq_nr;
68628+static atomic_unchecked_t autogroup_seq_nr;
68629
68630 void __init autogroup_init(struct task_struct *init_task)
68631 {
68632@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
68633
68634 kref_init(&ag->kref);
68635 init_rwsem(&ag->lock);
68636- ag->id = atomic_inc_return(&autogroup_seq_nr);
68637+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
68638 ag->tg = tg;
68639 #ifdef CONFIG_RT_GROUP_SCHED
68640 /*
68641diff --git a/kernel/sched/core.c b/kernel/sched/core.c
68642index b342f57..00324a0 100644
68643--- a/kernel/sched/core.c
68644+++ b/kernel/sched/core.c
68645@@ -3143,6 +3143,19 @@ pick_next_task(struct rq *rq)
68646 BUG(); /* the idle class will always have a runnable task */
68647 }
68648
68649+#ifdef CONFIG_GRKERNSEC_SETXID
68650+extern void gr_delayed_cred_worker(void);
68651+static inline void gr_cred_schedule(void)
68652+{
68653+ if (unlikely(current->delayed_cred))
68654+ gr_delayed_cred_worker();
68655+}
68656+#else
68657+static inline void gr_cred_schedule(void)
68658+{
68659+}
68660+#endif
68661+
68662 /*
68663 * __schedule() is the main scheduler function.
68664 */
68665@@ -3162,6 +3175,8 @@ need_resched:
68666
68667 schedule_debug(prev);
68668
68669+ gr_cred_schedule();
68670+
68671 if (sched_feat(HRTICK))
68672 hrtick_clear(rq);
68673
68674@@ -3852,6 +3867,8 @@ int can_nice(const struct task_struct *p, const int nice)
68675 /* convert nice value [19,-20] to rlimit style value [1,40] */
68676 int nice_rlim = 20 - nice;
68677
68678+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
68679+
68680 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
68681 capable(CAP_SYS_NICE));
68682 }
68683@@ -3885,7 +3902,8 @@ SYSCALL_DEFINE1(nice, int, increment)
68684 if (nice > 19)
68685 nice = 19;
68686
68687- if (increment < 0 && !can_nice(current, nice))
68688+ if (increment < 0 && (!can_nice(current, nice) ||
68689+ gr_handle_chroot_nice()))
68690 return -EPERM;
68691
68692 retval = security_task_setnice(current, nice);
68693@@ -4042,6 +4060,7 @@ recheck:
68694 unsigned long rlim_rtprio =
68695 task_rlimit(p, RLIMIT_RTPRIO);
68696
68697+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
68698 /* can't set/change the rt policy */
68699 if (policy != p->policy && !rlim_rtprio)
68700 return -EPERM;
68701diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
68702index aca16b8..8e3acc4 100644
68703--- a/kernel/sched/fair.c
68704+++ b/kernel/sched/fair.c
68705@@ -5147,7 +5147,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
68706 * run_rebalance_domains is triggered when needed from the scheduler tick.
68707 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
68708 */
68709-static void run_rebalance_domains(struct softirq_action *h)
68710+static void run_rebalance_domains(void)
68711 {
68712 int this_cpu = smp_processor_id();
68713 struct rq *this_rq = cpu_rq(this_cpu);
68714diff --git a/kernel/signal.c b/kernel/signal.c
68715index c73c428..7040057 100644
68716--- a/kernel/signal.c
68717+++ b/kernel/signal.c
68718@@ -46,12 +46,12 @@ static struct kmem_cache *sigqueue_cachep;
68719
68720 int print_fatal_signals __read_mostly;
68721
68722-static void __user *sig_handler(struct task_struct *t, int sig)
68723+static __sighandler_t sig_handler(struct task_struct *t, int sig)
68724 {
68725 return t->sighand->action[sig - 1].sa.sa_handler;
68726 }
68727
68728-static int sig_handler_ignored(void __user *handler, int sig)
68729+static int sig_handler_ignored(__sighandler_t handler, int sig)
68730 {
68731 /* Is it explicitly or implicitly ignored? */
68732 return handler == SIG_IGN ||
68733@@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
68734 static int sig_task_ignored(struct task_struct *t, int sig,
68735 int from_ancestor_ns)
68736 {
68737- void __user *handler;
68738+ __sighandler_t handler;
68739
68740 handler = sig_handler(t, sig);
68741
68742@@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
68743 atomic_inc(&user->sigpending);
68744 rcu_read_unlock();
68745
68746+ if (!override_rlimit)
68747+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
68748+
68749 if (override_rlimit ||
68750 atomic_read(&user->sigpending) <=
68751 task_rlimit(t, RLIMIT_SIGPENDING)) {
68752@@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
68753
68754 int unhandled_signal(struct task_struct *tsk, int sig)
68755 {
68756- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
68757+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
68758 if (is_global_init(tsk))
68759 return 1;
68760 if (handler != SIG_IGN && handler != SIG_DFL)
68761@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
68762 }
68763 }
68764
68765+ /* allow glibc communication via tgkill to other threads in our
68766+ thread group */
68767+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
68768+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
68769+ && gr_handle_signal(t, sig))
68770+ return -EPERM;
68771+
68772 return security_task_kill(t, info, sig, 0);
68773 }
68774
68775@@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68776 return send_signal(sig, info, p, 1);
68777 }
68778
68779-static int
68780+int
68781 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68782 {
68783 return send_signal(sig, info, t, 0);
68784@@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68785 unsigned long int flags;
68786 int ret, blocked, ignored;
68787 struct k_sigaction *action;
68788+ int is_unhandled = 0;
68789
68790 spin_lock_irqsave(&t->sighand->siglock, flags);
68791 action = &t->sighand->action[sig-1];
68792@@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68793 }
68794 if (action->sa.sa_handler == SIG_DFL)
68795 t->signal->flags &= ~SIGNAL_UNKILLABLE;
68796+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
68797+ is_unhandled = 1;
68798 ret = specific_send_sig_info(sig, info, t);
68799 spin_unlock_irqrestore(&t->sighand->siglock, flags);
68800
68801+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
68802+ normal operation */
68803+ if (is_unhandled) {
68804+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
68805+ gr_handle_crash(t, sig);
68806+ }
68807+
68808 return ret;
68809 }
68810
68811@@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68812 ret = check_kill_permission(sig, info, p);
68813 rcu_read_unlock();
68814
68815- if (!ret && sig)
68816+ if (!ret && sig) {
68817 ret = do_send_sig_info(sig, info, p, true);
68818+ if (!ret)
68819+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
68820+ }
68821
68822 return ret;
68823 }
68824@@ -2820,7 +2843,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
68825 int error = -ESRCH;
68826
68827 rcu_read_lock();
68828- p = find_task_by_vpid(pid);
68829+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68830+ /* allow glibc communication via tgkill to other threads in our
68831+ thread group */
68832+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
68833+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
68834+ p = find_task_by_vpid_unrestricted(pid);
68835+ else
68836+#endif
68837+ p = find_task_by_vpid(pid);
68838 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
68839 error = check_kill_permission(sig, info, p);
68840 /*
68841diff --git a/kernel/smp.c b/kernel/smp.c
68842index db197d6..17aef0b 100644
68843--- a/kernel/smp.c
68844+++ b/kernel/smp.c
68845@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
68846 }
68847 EXPORT_SYMBOL(smp_call_function);
68848
68849-void ipi_call_lock(void)
68850+void ipi_call_lock(void) __acquires(call_function.lock)
68851 {
68852 raw_spin_lock(&call_function.lock);
68853 }
68854
68855-void ipi_call_unlock(void)
68856+void ipi_call_unlock(void) __releases(call_function.lock)
68857 {
68858 raw_spin_unlock(&call_function.lock);
68859 }
68860
68861-void ipi_call_lock_irq(void)
68862+void ipi_call_lock_irq(void) __acquires(call_function.lock)
68863 {
68864 raw_spin_lock_irq(&call_function.lock);
68865 }
68866
68867-void ipi_call_unlock_irq(void)
68868+void ipi_call_unlock_irq(void) __releases(call_function.lock)
68869 {
68870 raw_spin_unlock_irq(&call_function.lock);
68871 }
68872diff --git a/kernel/softirq.c b/kernel/softirq.c
68873index 4eb3a0f..6f1fa81 100644
68874--- a/kernel/softirq.c
68875+++ b/kernel/softirq.c
68876@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
68877
68878 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
68879
68880-char *softirq_to_name[NR_SOFTIRQS] = {
68881+const char * const softirq_to_name[NR_SOFTIRQS] = {
68882 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
68883 "TASKLET", "SCHED", "HRTIMER", "RCU"
68884 };
68885@@ -235,7 +235,7 @@ restart:
68886 kstat_incr_softirqs_this_cpu(vec_nr);
68887
68888 trace_softirq_entry(vec_nr);
68889- h->action(h);
68890+ h->action();
68891 trace_softirq_exit(vec_nr);
68892 if (unlikely(prev_count != preempt_count())) {
68893 printk(KERN_ERR "huh, entered softirq %u %s %p"
68894@@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
68895 local_irq_restore(flags);
68896 }
68897
68898-void open_softirq(int nr, void (*action)(struct softirq_action *))
68899+void open_softirq(int nr, void (*action)(void))
68900 {
68901- softirq_vec[nr].action = action;
68902+ pax_open_kernel();
68903+ *(void **)&softirq_vec[nr].action = action;
68904+ pax_close_kernel();
68905 }
68906
68907 /*
68908@@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
68909
68910 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68911
68912-static void tasklet_action(struct softirq_action *a)
68913+static void tasklet_action(void)
68914 {
68915 struct tasklet_struct *list;
68916
68917@@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
68918 }
68919 }
68920
68921-static void tasklet_hi_action(struct softirq_action *a)
68922+static void tasklet_hi_action(void)
68923 {
68924 struct tasklet_struct *list;
68925
68926diff --git a/kernel/sys.c b/kernel/sys.c
68927index 888d227..f04b318 100644
68928--- a/kernel/sys.c
68929+++ b/kernel/sys.c
68930@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
68931 error = -EACCES;
68932 goto out;
68933 }
68934+
68935+ if (gr_handle_chroot_setpriority(p, niceval)) {
68936+ error = -EACCES;
68937+ goto out;
68938+ }
68939+
68940 no_nice = security_task_setnice(p, niceval);
68941 if (no_nice) {
68942 error = no_nice;
68943@@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
68944 goto error;
68945 }
68946
68947+ if (gr_check_group_change(new->gid, new->egid, -1))
68948+ goto error;
68949+
68950 if (rgid != (gid_t) -1 ||
68951 (egid != (gid_t) -1 && egid != old->gid))
68952 new->sgid = new->egid;
68953@@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
68954 old = current_cred();
68955
68956 retval = -EPERM;
68957+
68958+ if (gr_check_group_change(gid, gid, gid))
68959+ goto error;
68960+
68961 if (nsown_capable(CAP_SETGID))
68962 new->gid = new->egid = new->sgid = new->fsgid = gid;
68963 else if (gid == old->gid || gid == old->sgid)
68964@@ -618,7 +631,7 @@ error:
68965 /*
68966 * change the user struct in a credentials set to match the new UID
68967 */
68968-static int set_user(struct cred *new)
68969+int set_user(struct cred *new)
68970 {
68971 struct user_struct *new_user;
68972
68973@@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
68974 goto error;
68975 }
68976
68977+ if (gr_check_user_change(new->uid, new->euid, -1))
68978+ goto error;
68979+
68980 if (new->uid != old->uid) {
68981 retval = set_user(new);
68982 if (retval < 0)
68983@@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
68984 old = current_cred();
68985
68986 retval = -EPERM;
68987+
68988+ if (gr_check_crash_uid(uid))
68989+ goto error;
68990+ if (gr_check_user_change(uid, uid, uid))
68991+ goto error;
68992+
68993 if (nsown_capable(CAP_SETUID)) {
68994 new->suid = new->uid = uid;
68995 if (uid != old->uid) {
68996@@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
68997 goto error;
68998 }
68999
69000+ if (gr_check_user_change(ruid, euid, -1))
69001+ goto error;
69002+
69003 if (ruid != (uid_t) -1) {
69004 new->uid = ruid;
69005 if (ruid != old->uid) {
69006@@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
69007 goto error;
69008 }
69009
69010+ if (gr_check_group_change(rgid, egid, -1))
69011+ goto error;
69012+
69013 if (rgid != (gid_t) -1)
69014 new->gid = rgid;
69015 if (egid != (gid_t) -1)
69016@@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
69017 old = current_cred();
69018 old_fsuid = old->fsuid;
69019
69020+ if (gr_check_user_change(-1, -1, uid))
69021+ goto error;
69022+
69023 if (uid == old->uid || uid == old->euid ||
69024 uid == old->suid || uid == old->fsuid ||
69025 nsown_capable(CAP_SETUID)) {
69026@@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
69027 }
69028 }
69029
69030+error:
69031 abort_creds(new);
69032 return old_fsuid;
69033
69034@@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
69035 if (gid == old->gid || gid == old->egid ||
69036 gid == old->sgid || gid == old->fsgid ||
69037 nsown_capable(CAP_SETGID)) {
69038+ if (gr_check_group_change(-1, -1, gid))
69039+ goto error;
69040+
69041 if (gid != old_fsgid) {
69042 new->fsgid = gid;
69043 goto change_okay;
69044 }
69045 }
69046
69047+error:
69048 abort_creds(new);
69049 return old_fsgid;
69050
69051@@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
69052 }
69053 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
69054 snprintf(buf, len, "2.6.%u%s", v, rest);
69055- ret = copy_to_user(release, buf, len);
69056+ if (len > sizeof(buf))
69057+ ret = -EFAULT;
69058+ else
69059+ ret = copy_to_user(release, buf, len);
69060 }
69061 return ret;
69062 }
69063@@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
69064 return -EFAULT;
69065
69066 down_read(&uts_sem);
69067- error = __copy_to_user(&name->sysname, &utsname()->sysname,
69068+ error = __copy_to_user(name->sysname, &utsname()->sysname,
69069 __OLD_UTS_LEN);
69070 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
69071- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
69072+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
69073 __OLD_UTS_LEN);
69074 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
69075- error |= __copy_to_user(&name->release, &utsname()->release,
69076+ error |= __copy_to_user(name->release, &utsname()->release,
69077 __OLD_UTS_LEN);
69078 error |= __put_user(0, name->release + __OLD_UTS_LEN);
69079- error |= __copy_to_user(&name->version, &utsname()->version,
69080+ error |= __copy_to_user(name->version, &utsname()->version,
69081 __OLD_UTS_LEN);
69082 error |= __put_user(0, name->version + __OLD_UTS_LEN);
69083- error |= __copy_to_user(&name->machine, &utsname()->machine,
69084+ error |= __copy_to_user(name->machine, &utsname()->machine,
69085 __OLD_UTS_LEN);
69086 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
69087 up_read(&uts_sem);
69088@@ -1838,7 +1877,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
69089 error = get_dumpable(me->mm);
69090 break;
69091 case PR_SET_DUMPABLE:
69092- if (arg2 < 0 || arg2 > 1) {
69093+ if (arg2 > 1) {
69094 error = -EINVAL;
69095 break;
69096 }
69097diff --git a/kernel/sysctl.c b/kernel/sysctl.c
69098index f487f25..9056a9e 100644
69099--- a/kernel/sysctl.c
69100+++ b/kernel/sysctl.c
69101@@ -86,6 +86,13 @@
69102
69103
69104 #if defined(CONFIG_SYSCTL)
69105+#include <linux/grsecurity.h>
69106+#include <linux/grinternal.h>
69107+
69108+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
69109+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
69110+ const int op);
69111+extern int gr_handle_chroot_sysctl(const int op);
69112
69113 /* External variables not in a header file. */
69114 extern int sysctl_overcommit_memory;
69115@@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
69116 }
69117
69118 #endif
69119+extern struct ctl_table grsecurity_table[];
69120
69121 static struct ctl_table root_table[];
69122 static struct ctl_table_root sysctl_table_root;
69123@@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
69124 int sysctl_legacy_va_layout;
69125 #endif
69126
69127+#ifdef CONFIG_PAX_SOFTMODE
69128+static ctl_table pax_table[] = {
69129+ {
69130+ .procname = "softmode",
69131+ .data = &pax_softmode,
69132+ .maxlen = sizeof(unsigned int),
69133+ .mode = 0600,
69134+ .proc_handler = &proc_dointvec,
69135+ },
69136+
69137+ { }
69138+};
69139+#endif
69140+
69141 /* The default sysctl tables: */
69142
69143 static struct ctl_table root_table[] = {
69144@@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
69145 #endif
69146
69147 static struct ctl_table kern_table[] = {
69148+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
69149+ {
69150+ .procname = "grsecurity",
69151+ .mode = 0500,
69152+ .child = grsecurity_table,
69153+ },
69154+#endif
69155+
69156+#ifdef CONFIG_PAX_SOFTMODE
69157+ {
69158+ .procname = "pax",
69159+ .mode = 0500,
69160+ .child = pax_table,
69161+ },
69162+#endif
69163+
69164 {
69165 .procname = "sched_child_runs_first",
69166 .data = &sysctl_sched_child_runs_first,
69167@@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
69168 .data = &modprobe_path,
69169 .maxlen = KMOD_PATH_LEN,
69170 .mode = 0644,
69171- .proc_handler = proc_dostring,
69172+ .proc_handler = proc_dostring_modpriv,
69173 },
69174 {
69175 .procname = "modules_disabled",
69176@@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
69177 .extra1 = &zero,
69178 .extra2 = &one,
69179 },
69180+#endif
69181 {
69182 .procname = "kptr_restrict",
69183 .data = &kptr_restrict,
69184 .maxlen = sizeof(int),
69185 .mode = 0644,
69186 .proc_handler = proc_dmesg_restrict,
69187+#ifdef CONFIG_GRKERNSEC_HIDESYM
69188+ .extra1 = &two,
69189+#else
69190 .extra1 = &zero,
69191+#endif
69192 .extra2 = &two,
69193 },
69194-#endif
69195 {
69196 .procname = "ngroups_max",
69197 .data = &ngroups_max,
69198@@ -1225,6 +1267,13 @@ static struct ctl_table vm_table[] = {
69199 .proc_handler = proc_dointvec_minmax,
69200 .extra1 = &zero,
69201 },
69202+ {
69203+ .procname = "heap_stack_gap",
69204+ .data = &sysctl_heap_stack_gap,
69205+ .maxlen = sizeof(sysctl_heap_stack_gap),
69206+ .mode = 0644,
69207+ .proc_handler = proc_doulongvec_minmax,
69208+ },
69209 #else
69210 {
69211 .procname = "nr_trim_pages",
69212@@ -1729,6 +1778,17 @@ static int test_perm(int mode, int op)
69213 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
69214 {
69215 int mode;
69216+ int error;
69217+
69218+ if (table->parent != NULL && table->parent->procname != NULL &&
69219+ table->procname != NULL &&
69220+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
69221+ return -EACCES;
69222+ if (gr_handle_chroot_sysctl(op))
69223+ return -EACCES;
69224+ error = gr_handle_sysctl(table, op);
69225+ if (error)
69226+ return error;
69227
69228 if (root->permissions)
69229 mode = root->permissions(root, current->nsproxy, table);
69230@@ -2133,6 +2193,16 @@ int proc_dostring(struct ctl_table *table, int write,
69231 buffer, lenp, ppos);
69232 }
69233
69234+int proc_dostring_modpriv(struct ctl_table *table, int write,
69235+ void __user *buffer, size_t *lenp, loff_t *ppos)
69236+{
69237+ if (write && !capable(CAP_SYS_MODULE))
69238+ return -EPERM;
69239+
69240+ return _proc_do_string(table->data, table->maxlen, write,
69241+ buffer, lenp, ppos);
69242+}
69243+
69244 static size_t proc_skip_spaces(char **buf)
69245 {
69246 size_t ret;
69247@@ -2238,6 +2308,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
69248 len = strlen(tmp);
69249 if (len > *size)
69250 len = *size;
69251+ if (len > sizeof(tmp))
69252+ len = sizeof(tmp);
69253 if (copy_to_user(*buf, tmp, len))
69254 return -EFAULT;
69255 *size -= len;
69256@@ -2554,8 +2626,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
69257 *i = val;
69258 } else {
69259 val = convdiv * (*i) / convmul;
69260- if (!first)
69261+ if (!first) {
69262 err = proc_put_char(&buffer, &left, '\t');
69263+ if (err)
69264+ break;
69265+ }
69266 err = proc_put_long(&buffer, &left, val, false);
69267 if (err)
69268 break;
69269@@ -2950,6 +3025,12 @@ int proc_dostring(struct ctl_table *table, int write,
69270 return -ENOSYS;
69271 }
69272
69273+int proc_dostring_modpriv(struct ctl_table *table, int write,
69274+ void __user *buffer, size_t *lenp, loff_t *ppos)
69275+{
69276+ return -ENOSYS;
69277+}
69278+
69279 int proc_dointvec(struct ctl_table *table, int write,
69280 void __user *buffer, size_t *lenp, loff_t *ppos)
69281 {
69282@@ -3006,6 +3087,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
69283 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
69284 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
69285 EXPORT_SYMBOL(proc_dostring);
69286+EXPORT_SYMBOL(proc_dostring_modpriv);
69287 EXPORT_SYMBOL(proc_doulongvec_minmax);
69288 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
69289 EXPORT_SYMBOL(register_sysctl_table);
69290diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
69291index a650694..aaeeb20 100644
69292--- a/kernel/sysctl_binary.c
69293+++ b/kernel/sysctl_binary.c
69294@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
69295 int i;
69296
69297 set_fs(KERNEL_DS);
69298- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69299+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69300 set_fs(old_fs);
69301 if (result < 0)
69302 goto out_kfree;
69303@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
69304 }
69305
69306 set_fs(KERNEL_DS);
69307- result = vfs_write(file, buffer, str - buffer, &pos);
69308+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69309 set_fs(old_fs);
69310 if (result < 0)
69311 goto out_kfree;
69312@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
69313 int i;
69314
69315 set_fs(KERNEL_DS);
69316- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69317+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69318 set_fs(old_fs);
69319 if (result < 0)
69320 goto out_kfree;
69321@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
69322 }
69323
69324 set_fs(KERNEL_DS);
69325- result = vfs_write(file, buffer, str - buffer, &pos);
69326+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69327 set_fs(old_fs);
69328 if (result < 0)
69329 goto out_kfree;
69330@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
69331 int i;
69332
69333 set_fs(KERNEL_DS);
69334- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69335+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69336 set_fs(old_fs);
69337 if (result < 0)
69338 goto out;
69339@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69340 __le16 dnaddr;
69341
69342 set_fs(KERNEL_DS);
69343- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69344+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69345 set_fs(old_fs);
69346 if (result < 0)
69347 goto out;
69348@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69349 le16_to_cpu(dnaddr) & 0x3ff);
69350
69351 set_fs(KERNEL_DS);
69352- result = vfs_write(file, buf, len, &pos);
69353+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
69354 set_fs(old_fs);
69355 if (result < 0)
69356 goto out;
69357diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
69358index 362da65..ab8ef8c 100644
69359--- a/kernel/sysctl_check.c
69360+++ b/kernel/sysctl_check.c
69361@@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
69362 set_fail(&fail, table, "Directory with extra2");
69363 } else {
69364 if ((table->proc_handler == proc_dostring) ||
69365+ (table->proc_handler == proc_dostring_modpriv) ||
69366 (table->proc_handler == proc_dointvec) ||
69367 (table->proc_handler == proc_dointvec_minmax) ||
69368 (table->proc_handler == proc_dointvec_jiffies) ||
69369diff --git a/kernel/taskstats.c b/kernel/taskstats.c
69370index e660464..c8b9e67 100644
69371--- a/kernel/taskstats.c
69372+++ b/kernel/taskstats.c
69373@@ -27,9 +27,12 @@
69374 #include <linux/cgroup.h>
69375 #include <linux/fs.h>
69376 #include <linux/file.h>
69377+#include <linux/grsecurity.h>
69378 #include <net/genetlink.h>
69379 #include <linux/atomic.h>
69380
69381+extern int gr_is_taskstats_denied(int pid);
69382+
69383 /*
69384 * Maximum length of a cpumask that can be specified in
69385 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
69386@@ -556,6 +559,9 @@ err:
69387
69388 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
69389 {
69390+ if (gr_is_taskstats_denied(current->pid))
69391+ return -EACCES;
69392+
69393 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
69394 return cmd_attr_register_cpumask(info);
69395 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
69396diff --git a/kernel/time.c b/kernel/time.c
69397index 73e416d..cfc6f69 100644
69398--- a/kernel/time.c
69399+++ b/kernel/time.c
69400@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
69401 return error;
69402
69403 if (tz) {
69404+ /* we log in do_settimeofday called below, so don't log twice
69405+ */
69406+ if (!tv)
69407+ gr_log_timechange();
69408+
69409 /* SMP safe, global irq locking makes it work. */
69410 sys_tz = *tz;
69411 update_vsyscall_tz();
69412diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
69413index 8a46f5d..bbe6f9c 100644
69414--- a/kernel/time/alarmtimer.c
69415+++ b/kernel/time/alarmtimer.c
69416@@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
69417 struct platform_device *pdev;
69418 int error = 0;
69419 int i;
69420- struct k_clock alarm_clock = {
69421+ static struct k_clock alarm_clock = {
69422 .clock_getres = alarm_clock_getres,
69423 .clock_get = alarm_clock_get,
69424 .timer_create = alarm_timer_create,
69425diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
69426index fd4a7b1..fae5c2a 100644
69427--- a/kernel/time/tick-broadcast.c
69428+++ b/kernel/time/tick-broadcast.c
69429@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
69430 * then clear the broadcast bit.
69431 */
69432 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
69433- int cpu = smp_processor_id();
69434+ cpu = smp_processor_id();
69435
69436 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
69437 tick_broadcast_clear_oneshot(cpu);
69438diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
69439index 0c63581..e25dcb6 100644
69440--- a/kernel/time/timekeeping.c
69441+++ b/kernel/time/timekeeping.c
69442@@ -14,6 +14,7 @@
69443 #include <linux/init.h>
69444 #include <linux/mm.h>
69445 #include <linux/sched.h>
69446+#include <linux/grsecurity.h>
69447 #include <linux/syscore_ops.h>
69448 #include <linux/clocksource.h>
69449 #include <linux/jiffies.h>
69450@@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
69451 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
69452 return -EINVAL;
69453
69454+ gr_log_timechange();
69455+
69456 write_seqlock_irqsave(&xtime_lock, flags);
69457
69458 timekeeping_forward_now();
69459diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
69460index 3258455..f35227d 100644
69461--- a/kernel/time/timer_list.c
69462+++ b/kernel/time/timer_list.c
69463@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
69464
69465 static void print_name_offset(struct seq_file *m, void *sym)
69466 {
69467+#ifdef CONFIG_GRKERNSEC_HIDESYM
69468+ SEQ_printf(m, "<%p>", NULL);
69469+#else
69470 char symname[KSYM_NAME_LEN];
69471
69472 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
69473 SEQ_printf(m, "<%pK>", sym);
69474 else
69475 SEQ_printf(m, "%s", symname);
69476+#endif
69477 }
69478
69479 static void
69480@@ -112,7 +116,11 @@ next_one:
69481 static void
69482 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
69483 {
69484+#ifdef CONFIG_GRKERNSEC_HIDESYM
69485+ SEQ_printf(m, " .base: %p\n", NULL);
69486+#else
69487 SEQ_printf(m, " .base: %pK\n", base);
69488+#endif
69489 SEQ_printf(m, " .index: %d\n",
69490 base->index);
69491 SEQ_printf(m, " .resolution: %Lu nsecs\n",
69492@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
69493 {
69494 struct proc_dir_entry *pe;
69495
69496+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69497+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
69498+#else
69499 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
69500+#endif
69501 if (!pe)
69502 return -ENOMEM;
69503 return 0;
69504diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
69505index 0b537f2..9e71eca 100644
69506--- a/kernel/time/timer_stats.c
69507+++ b/kernel/time/timer_stats.c
69508@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
69509 static unsigned long nr_entries;
69510 static struct entry entries[MAX_ENTRIES];
69511
69512-static atomic_t overflow_count;
69513+static atomic_unchecked_t overflow_count;
69514
69515 /*
69516 * The entries are in a hash-table, for fast lookup:
69517@@ -140,7 +140,7 @@ static void reset_entries(void)
69518 nr_entries = 0;
69519 memset(entries, 0, sizeof(entries));
69520 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
69521- atomic_set(&overflow_count, 0);
69522+ atomic_set_unchecked(&overflow_count, 0);
69523 }
69524
69525 static struct entry *alloc_entry(void)
69526@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69527 if (likely(entry))
69528 entry->count++;
69529 else
69530- atomic_inc(&overflow_count);
69531+ atomic_inc_unchecked(&overflow_count);
69532
69533 out_unlock:
69534 raw_spin_unlock_irqrestore(lock, flags);
69535@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69536
69537 static void print_name_offset(struct seq_file *m, unsigned long addr)
69538 {
69539+#ifdef CONFIG_GRKERNSEC_HIDESYM
69540+ seq_printf(m, "<%p>", NULL);
69541+#else
69542 char symname[KSYM_NAME_LEN];
69543
69544 if (lookup_symbol_name(addr, symname) < 0)
69545 seq_printf(m, "<%p>", (void *)addr);
69546 else
69547 seq_printf(m, "%s", symname);
69548+#endif
69549 }
69550
69551 static int tstats_show(struct seq_file *m, void *v)
69552@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
69553
69554 seq_puts(m, "Timer Stats Version: v0.2\n");
69555 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
69556- if (atomic_read(&overflow_count))
69557+ if (atomic_read_unchecked(&overflow_count))
69558 seq_printf(m, "Overflow: %d entries\n",
69559- atomic_read(&overflow_count));
69560+ atomic_read_unchecked(&overflow_count));
69561
69562 for (i = 0; i < nr_entries; i++) {
69563 entry = entries + i;
69564@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
69565 {
69566 struct proc_dir_entry *pe;
69567
69568+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69569+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
69570+#else
69571 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
69572+#endif
69573 if (!pe)
69574 return -ENOMEM;
69575 return 0;
69576diff --git a/kernel/timer.c b/kernel/timer.c
69577index a297ffc..5e16b0b 100644
69578--- a/kernel/timer.c
69579+++ b/kernel/timer.c
69580@@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
69581 /*
69582 * This function runs timers and the timer-tq in bottom half context.
69583 */
69584-static void run_timer_softirq(struct softirq_action *h)
69585+static void run_timer_softirq(void)
69586 {
69587 struct tvec_base *base = __this_cpu_read(tvec_bases);
69588
69589diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
69590index cdea7b5..9b820d4 100644
69591--- a/kernel/trace/blktrace.c
69592+++ b/kernel/trace/blktrace.c
69593@@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
69594 struct blk_trace *bt = filp->private_data;
69595 char buf[16];
69596
69597- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
69598+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
69599
69600 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
69601 }
69602@@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
69603 return 1;
69604
69605 bt = buf->chan->private_data;
69606- atomic_inc(&bt->dropped);
69607+ atomic_inc_unchecked(&bt->dropped);
69608 return 0;
69609 }
69610
69611@@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
69612
69613 bt->dir = dir;
69614 bt->dev = dev;
69615- atomic_set(&bt->dropped, 0);
69616+ atomic_set_unchecked(&bt->dropped, 0);
69617
69618 ret = -EIO;
69619 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
69620diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
69621index 683d559..d70d914 100644
69622--- a/kernel/trace/ftrace.c
69623+++ b/kernel/trace/ftrace.c
69624@@ -1726,12 +1726,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
69625 if (unlikely(ftrace_disabled))
69626 return 0;
69627
69628+ ret = ftrace_arch_code_modify_prepare();
69629+ FTRACE_WARN_ON(ret);
69630+ if (ret)
69631+ return 0;
69632+
69633 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
69634+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
69635 if (ret) {
69636 ftrace_bug(ret, ip);
69637- return 0;
69638 }
69639- return 1;
69640+ return ret ? 0 : 1;
69641 }
69642
69643 /*
69644@@ -2843,7 +2848,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
69645
69646 int
69647 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
69648- void *data)
69649+ void *data)
69650 {
69651 struct ftrace_func_probe *entry;
69652 struct ftrace_page *pg;
69653diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
69654index a3f1bc5..5e651718 100644
69655--- a/kernel/trace/trace.c
69656+++ b/kernel/trace/trace.c
69657@@ -4254,10 +4254,9 @@ static const struct file_operations tracing_dyn_info_fops = {
69658 };
69659 #endif
69660
69661-static struct dentry *d_tracer;
69662-
69663 struct dentry *tracing_init_dentry(void)
69664 {
69665+ static struct dentry *d_tracer;
69666 static int once;
69667
69668 if (d_tracer)
69669@@ -4277,10 +4276,9 @@ struct dentry *tracing_init_dentry(void)
69670 return d_tracer;
69671 }
69672
69673-static struct dentry *d_percpu;
69674-
69675 struct dentry *tracing_dentry_percpu(void)
69676 {
69677+ static struct dentry *d_percpu;
69678 static int once;
69679 struct dentry *d_tracer;
69680
69681diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
69682index c212a7f..7b02394 100644
69683--- a/kernel/trace/trace_events.c
69684+++ b/kernel/trace/trace_events.c
69685@@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
69686 struct ftrace_module_file_ops {
69687 struct list_head list;
69688 struct module *mod;
69689- struct file_operations id;
69690- struct file_operations enable;
69691- struct file_operations format;
69692- struct file_operations filter;
69693 };
69694
69695 static struct ftrace_module_file_ops *
69696@@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
69697
69698 file_ops->mod = mod;
69699
69700- file_ops->id = ftrace_event_id_fops;
69701- file_ops->id.owner = mod;
69702-
69703- file_ops->enable = ftrace_enable_fops;
69704- file_ops->enable.owner = mod;
69705-
69706- file_ops->filter = ftrace_event_filter_fops;
69707- file_ops->filter.owner = mod;
69708-
69709- file_ops->format = ftrace_event_format_fops;
69710- file_ops->format.owner = mod;
69711+ pax_open_kernel();
69712+ *(void **)&mod->trace_id.owner = mod;
69713+ *(void **)&mod->trace_enable.owner = mod;
69714+ *(void **)&mod->trace_filter.owner = mod;
69715+ *(void **)&mod->trace_format.owner = mod;
69716+ pax_close_kernel();
69717
69718 list_add(&file_ops->list, &ftrace_module_file_list);
69719
69720@@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
69721
69722 for_each_event(call, start, end) {
69723 __trace_add_event_call(*call, mod,
69724- &file_ops->id, &file_ops->enable,
69725- &file_ops->filter, &file_ops->format);
69726+ &mod->trace_id, &mod->trace_enable,
69727+ &mod->trace_filter, &mod->trace_format);
69728 }
69729 }
69730
69731diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
69732index 00d527c..7c5b1a3 100644
69733--- a/kernel/trace/trace_kprobe.c
69734+++ b/kernel/trace/trace_kprobe.c
69735@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69736 long ret;
69737 int maxlen = get_rloc_len(*(u32 *)dest);
69738 u8 *dst = get_rloc_data(dest);
69739- u8 *src = addr;
69740+ const u8 __user *src = (const u8 __force_user *)addr;
69741 mm_segment_t old_fs = get_fs();
69742 if (!maxlen)
69743 return;
69744@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69745 pagefault_disable();
69746 do
69747 ret = __copy_from_user_inatomic(dst++, src++, 1);
69748- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
69749+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
69750 dst[-1] = '\0';
69751 pagefault_enable();
69752 set_fs(old_fs);
69753@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69754 ((u8 *)get_rloc_data(dest))[0] = '\0';
69755 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
69756 } else
69757- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
69758+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
69759 get_rloc_offs(*(u32 *)dest));
69760 }
69761 /* Return the length of string -- including null terminal byte */
69762@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
69763 set_fs(KERNEL_DS);
69764 pagefault_disable();
69765 do {
69766- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
69767+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
69768 len++;
69769 } while (c && ret == 0 && len < MAX_STRING_SIZE);
69770 pagefault_enable();
69771diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
69772index fd3c8aa..5f324a6 100644
69773--- a/kernel/trace/trace_mmiotrace.c
69774+++ b/kernel/trace/trace_mmiotrace.c
69775@@ -24,7 +24,7 @@ struct header_iter {
69776 static struct trace_array *mmio_trace_array;
69777 static bool overrun_detected;
69778 static unsigned long prev_overruns;
69779-static atomic_t dropped_count;
69780+static atomic_unchecked_t dropped_count;
69781
69782 static void mmio_reset_data(struct trace_array *tr)
69783 {
69784@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
69785
69786 static unsigned long count_overruns(struct trace_iterator *iter)
69787 {
69788- unsigned long cnt = atomic_xchg(&dropped_count, 0);
69789+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
69790 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
69791
69792 if (over > prev_overruns)
69793@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
69794 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
69795 sizeof(*entry), 0, pc);
69796 if (!event) {
69797- atomic_inc(&dropped_count);
69798+ atomic_inc_unchecked(&dropped_count);
69799 return;
69800 }
69801 entry = ring_buffer_event_data(event);
69802@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
69803 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
69804 sizeof(*entry), 0, pc);
69805 if (!event) {
69806- atomic_inc(&dropped_count);
69807+ atomic_inc_unchecked(&dropped_count);
69808 return;
69809 }
69810 entry = ring_buffer_event_data(event);
69811diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
69812index 0d6ff35..67e0ed7 100644
69813--- a/kernel/trace/trace_output.c
69814+++ b/kernel/trace/trace_output.c
69815@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
69816
69817 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
69818 if (!IS_ERR(p)) {
69819- p = mangle_path(s->buffer + s->len, p, "\n");
69820+ p = mangle_path(s->buffer + s->len, p, "\n\\");
69821 if (p) {
69822 s->len = p - s->buffer;
69823 return 1;
69824diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
69825index d4545f4..a9010a1 100644
69826--- a/kernel/trace/trace_stack.c
69827+++ b/kernel/trace/trace_stack.c
69828@@ -53,7 +53,7 @@ static inline void check_stack(void)
69829 return;
69830
69831 /* we do not handle interrupt stacks yet */
69832- if (!object_is_on_stack(&this_size))
69833+ if (!object_starts_on_stack(&this_size))
69834 return;
69835
69836 local_irq_save(flags);
69837diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
69838index 209b379..7f76423 100644
69839--- a/kernel/trace/trace_workqueue.c
69840+++ b/kernel/trace/trace_workqueue.c
69841@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
69842 int cpu;
69843 pid_t pid;
69844 /* Can be inserted from interrupt or user context, need to be atomic */
69845- atomic_t inserted;
69846+ atomic_unchecked_t inserted;
69847 /*
69848 * Don't need to be atomic, works are serialized in a single workqueue thread
69849 * on a single CPU.
69850@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
69851 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
69852 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
69853 if (node->pid == wq_thread->pid) {
69854- atomic_inc(&node->inserted);
69855+ atomic_inc_unchecked(&node->inserted);
69856 goto found;
69857 }
69858 }
69859@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
69860 tsk = get_pid_task(pid, PIDTYPE_PID);
69861 if (tsk) {
69862 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
69863- atomic_read(&cws->inserted), cws->executed,
69864+ atomic_read_unchecked(&cws->inserted), cws->executed,
69865 tsk->comm);
69866 put_task_struct(tsk);
69867 }
69868diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
69869index 8745ac7..d144e37 100644
69870--- a/lib/Kconfig.debug
69871+++ b/lib/Kconfig.debug
69872@@ -1103,6 +1103,7 @@ config LATENCYTOP
69873 depends on DEBUG_KERNEL
69874 depends on STACKTRACE_SUPPORT
69875 depends on PROC_FS
69876+ depends on !GRKERNSEC_HIDESYM
69877 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
69878 select KALLSYMS
69879 select KALLSYMS_ALL
69880diff --git a/lib/bitmap.c b/lib/bitmap.c
69881index 0d4a127..33a06c7 100644
69882--- a/lib/bitmap.c
69883+++ b/lib/bitmap.c
69884@@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
69885 {
69886 int c, old_c, totaldigits, ndigits, nchunks, nbits;
69887 u32 chunk;
69888- const char __user __force *ubuf = (const char __user __force *)buf;
69889+ const char __user *ubuf = (const char __force_user *)buf;
69890
69891 bitmap_zero(maskp, nmaskbits);
69892
69893@@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
69894 {
69895 if (!access_ok(VERIFY_READ, ubuf, ulen))
69896 return -EFAULT;
69897- return __bitmap_parse((const char __force *)ubuf,
69898+ return __bitmap_parse((const char __force_kernel *)ubuf,
69899 ulen, 1, maskp, nmaskbits);
69900
69901 }
69902@@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
69903 {
69904 unsigned a, b;
69905 int c, old_c, totaldigits;
69906- const char __user __force *ubuf = (const char __user __force *)buf;
69907+ const char __user *ubuf = (const char __force_user *)buf;
69908 int exp_digit, in_range;
69909
69910 totaldigits = c = 0;
69911@@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
69912 {
69913 if (!access_ok(VERIFY_READ, ubuf, ulen))
69914 return -EFAULT;
69915- return __bitmap_parselist((const char __force *)ubuf,
69916+ return __bitmap_parselist((const char __force_kernel *)ubuf,
69917 ulen, 1, maskp, nmaskbits);
69918 }
69919 EXPORT_SYMBOL(bitmap_parselist_user);
69920diff --git a/lib/bug.c b/lib/bug.c
69921index a28c141..2bd3d95 100644
69922--- a/lib/bug.c
69923+++ b/lib/bug.c
69924@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
69925 return BUG_TRAP_TYPE_NONE;
69926
69927 bug = find_bug(bugaddr);
69928+ if (!bug)
69929+ return BUG_TRAP_TYPE_NONE;
69930
69931 file = NULL;
69932 line = 0;
69933diff --git a/lib/debugobjects.c b/lib/debugobjects.c
69934index 0ab9ae8..f01ceca 100644
69935--- a/lib/debugobjects.c
69936+++ b/lib/debugobjects.c
69937@@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
69938 if (limit > 4)
69939 return;
69940
69941- is_on_stack = object_is_on_stack(addr);
69942+ is_on_stack = object_starts_on_stack(addr);
69943 if (is_on_stack == onstack)
69944 return;
69945
69946diff --git a/lib/devres.c b/lib/devres.c
69947index 9676617..5149e15 100644
69948--- a/lib/devres.c
69949+++ b/lib/devres.c
69950@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
69951 void devm_iounmap(struct device *dev, void __iomem *addr)
69952 {
69953 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
69954- (void *)addr));
69955+ (void __force *)addr));
69956 iounmap(addr);
69957 }
69958 EXPORT_SYMBOL(devm_iounmap);
69959@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
69960 {
69961 ioport_unmap(addr);
69962 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69963- devm_ioport_map_match, (void *)addr));
69964+ devm_ioport_map_match, (void __force *)addr));
69965 }
69966 EXPORT_SYMBOL(devm_ioport_unmap);
69967
69968diff --git a/lib/dma-debug.c b/lib/dma-debug.c
69969index fea790a..ebb0e82 100644
69970--- a/lib/dma-debug.c
69971+++ b/lib/dma-debug.c
69972@@ -925,7 +925,7 @@ out:
69973
69974 static void check_for_stack(struct device *dev, void *addr)
69975 {
69976- if (object_is_on_stack(addr))
69977+ if (object_starts_on_stack(addr))
69978 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69979 "stack [addr=%p]\n", addr);
69980 }
69981diff --git a/lib/extable.c b/lib/extable.c
69982index 4cac81e..63e9b8f 100644
69983--- a/lib/extable.c
69984+++ b/lib/extable.c
69985@@ -13,6 +13,7 @@
69986 #include <linux/init.h>
69987 #include <linux/sort.h>
69988 #include <asm/uaccess.h>
69989+#include <asm/pgtable.h>
69990
69991 #ifndef ARCH_HAS_SORT_EXTABLE
69992 /*
69993@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
69994 void sort_extable(struct exception_table_entry *start,
69995 struct exception_table_entry *finish)
69996 {
69997+ pax_open_kernel();
69998 sort(start, finish - start, sizeof(struct exception_table_entry),
69999 cmp_ex, NULL);
70000+ pax_close_kernel();
70001 }
70002
70003 #ifdef CONFIG_MODULES
70004diff --git a/lib/inflate.c b/lib/inflate.c
70005index 013a761..c28f3fc 100644
70006--- a/lib/inflate.c
70007+++ b/lib/inflate.c
70008@@ -269,7 +269,7 @@ static void free(void *where)
70009 malloc_ptr = free_mem_ptr;
70010 }
70011 #else
70012-#define malloc(a) kmalloc(a, GFP_KERNEL)
70013+#define malloc(a) kmalloc((a), GFP_KERNEL)
70014 #define free(a) kfree(a)
70015 #endif
70016
70017diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
70018index bd2bea9..6b3c95e 100644
70019--- a/lib/is_single_threaded.c
70020+++ b/lib/is_single_threaded.c
70021@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
70022 struct task_struct *p, *t;
70023 bool ret;
70024
70025+ if (!mm)
70026+ return true;
70027+
70028 if (atomic_read(&task->signal->live) != 1)
70029 return false;
70030
70031diff --git a/lib/radix-tree.c b/lib/radix-tree.c
70032index dc63d08..95ae14a 100644
70033--- a/lib/radix-tree.c
70034+++ b/lib/radix-tree.c
70035@@ -78,7 +78,7 @@ struct radix_tree_preload {
70036 int nr;
70037 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
70038 };
70039-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
70040+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
70041
70042 static inline void *ptr_to_indirect(void *ptr)
70043 {
70044diff --git a/lib/vsprintf.c b/lib/vsprintf.c
70045index 38e612e..4fb99a8 100644
70046--- a/lib/vsprintf.c
70047+++ b/lib/vsprintf.c
70048@@ -16,6 +16,9 @@
70049 * - scnprintf and vscnprintf
70050 */
70051
70052+#ifdef CONFIG_GRKERNSEC_HIDESYM
70053+#define __INCLUDED_BY_HIDESYM 1
70054+#endif
70055 #include <stdarg.h>
70056 #include <linux/module.h>
70057 #include <linux/types.h>
70058@@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
70059 char sym[KSYM_SYMBOL_LEN];
70060 if (ext == 'B')
70061 sprint_backtrace(sym, value);
70062- else if (ext != 'f' && ext != 's')
70063+ else if (ext != 'f' && ext != 's' && ext != 'a')
70064 sprint_symbol(sym, value);
70065 else
70066 kallsyms_lookup(value, NULL, NULL, NULL, sym);
70067@@ -789,7 +792,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
70068 return number(buf, end, *(const netdev_features_t *)addr, spec);
70069 }
70070
70071+#ifdef CONFIG_GRKERNSEC_HIDESYM
70072+int kptr_restrict __read_mostly = 2;
70073+#else
70074 int kptr_restrict __read_mostly;
70075+#endif
70076
70077 /*
70078 * Show a '%p' thing. A kernel extension is that the '%p' is followed
70079@@ -803,6 +810,8 @@ int kptr_restrict __read_mostly;
70080 * - 'S' For symbolic direct pointers with offset
70081 * - 's' For symbolic direct pointers without offset
70082 * - 'B' For backtraced symbolic direct pointers with offset
70083+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
70084+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
70085 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
70086 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
70087 * - 'M' For a 6-byte MAC address, it prints the address in the
70088@@ -848,12 +857,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70089 {
70090 if (!ptr && *fmt != 'K') {
70091 /*
70092- * Print (null) with the same width as a pointer so it makes
70093+ * Print (nil) with the same width as a pointer so it makes
70094 * tabular output look nice.
70095 */
70096 if (spec.field_width == -1)
70097 spec.field_width = 2 * sizeof(void *);
70098- return string(buf, end, "(null)", spec);
70099+ return string(buf, end, "(nil)", spec);
70100 }
70101
70102 switch (*fmt) {
70103@@ -863,6 +872,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70104 /* Fallthrough */
70105 case 'S':
70106 case 's':
70107+#ifdef CONFIG_GRKERNSEC_HIDESYM
70108+ break;
70109+#else
70110+ return symbol_string(buf, end, ptr, spec, *fmt);
70111+#endif
70112+ case 'A':
70113+ case 'a':
70114 case 'B':
70115 return symbol_string(buf, end, ptr, spec, *fmt);
70116 case 'R':
70117@@ -1633,11 +1649,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
70118 typeof(type) value; \
70119 if (sizeof(type) == 8) { \
70120 args = PTR_ALIGN(args, sizeof(u32)); \
70121- *(u32 *)&value = *(u32 *)args; \
70122- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
70123+ *(u32 *)&value = *(const u32 *)args; \
70124+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
70125 } else { \
70126 args = PTR_ALIGN(args, sizeof(type)); \
70127- value = *(typeof(type) *)args; \
70128+ value = *(const typeof(type) *)args; \
70129 } \
70130 args += sizeof(type); \
70131 value; \
70132@@ -1700,7 +1716,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
70133 case FORMAT_TYPE_STR: {
70134 const char *str_arg = args;
70135 args += strlen(str_arg) + 1;
70136- str = string(str, end, (char *)str_arg, spec);
70137+ str = string(str, end, str_arg, spec);
70138 break;
70139 }
70140
70141diff --git a/localversion-grsec b/localversion-grsec
70142new file mode 100644
70143index 0000000..7cd6065
70144--- /dev/null
70145+++ b/localversion-grsec
70146@@ -0,0 +1 @@
70147+-grsec
70148diff --git a/mm/Kconfig b/mm/Kconfig
70149index e338407..49b5b7a 100644
70150--- a/mm/Kconfig
70151+++ b/mm/Kconfig
70152@@ -247,10 +247,10 @@ config KSM
70153 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
70154
70155 config DEFAULT_MMAP_MIN_ADDR
70156- int "Low address space to protect from user allocation"
70157+ int "Low address space to protect from user allocation"
70158 depends on MMU
70159- default 4096
70160- help
70161+ default 65536
70162+ help
70163 This is the portion of low virtual memory which should be protected
70164 from userspace allocation. Keeping a user from writing to low pages
70165 can help reduce the impact of kernel NULL pointer bugs.
70166diff --git a/mm/filemap.c b/mm/filemap.c
70167index b662757..3081ddd 100644
70168--- a/mm/filemap.c
70169+++ b/mm/filemap.c
70170@@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
70171 struct address_space *mapping = file->f_mapping;
70172
70173 if (!mapping->a_ops->readpage)
70174- return -ENOEXEC;
70175+ return -ENODEV;
70176 file_accessed(file);
70177 vma->vm_ops = &generic_file_vm_ops;
70178 vma->vm_flags |= VM_CAN_NONLINEAR;
70179@@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
70180 *pos = i_size_read(inode);
70181
70182 if (limit != RLIM_INFINITY) {
70183+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
70184 if (*pos >= limit) {
70185 send_sig(SIGXFSZ, current, 0);
70186 return -EFBIG;
70187diff --git a/mm/fremap.c b/mm/fremap.c
70188index 9ed4fd4..c42648d 100644
70189--- a/mm/fremap.c
70190+++ b/mm/fremap.c
70191@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
70192 retry:
70193 vma = find_vma(mm, start);
70194
70195+#ifdef CONFIG_PAX_SEGMEXEC
70196+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
70197+ goto out;
70198+#endif
70199+
70200 /*
70201 * Make sure the vma is shared, that it supports prefaulting,
70202 * and that the remapped range is valid and fully within
70203diff --git a/mm/highmem.c b/mm/highmem.c
70204index 57d82c6..e9e0552 100644
70205--- a/mm/highmem.c
70206+++ b/mm/highmem.c
70207@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
70208 * So no dangers, even with speculative execution.
70209 */
70210 page = pte_page(pkmap_page_table[i]);
70211+ pax_open_kernel();
70212 pte_clear(&init_mm, (unsigned long)page_address(page),
70213 &pkmap_page_table[i]);
70214-
70215+ pax_close_kernel();
70216 set_page_address(page, NULL);
70217 need_flush = 1;
70218 }
70219@@ -186,9 +187,11 @@ start:
70220 }
70221 }
70222 vaddr = PKMAP_ADDR(last_pkmap_nr);
70223+
70224+ pax_open_kernel();
70225 set_pte_at(&init_mm, vaddr,
70226 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
70227-
70228+ pax_close_kernel();
70229 pkmap_count[last_pkmap_nr] = 1;
70230 set_page_address(page, (void *)vaddr);
70231
70232diff --git a/mm/huge_memory.c b/mm/huge_memory.c
70233index 8f7fc39..69bf1e9 100644
70234--- a/mm/huge_memory.c
70235+++ b/mm/huge_memory.c
70236@@ -733,7 +733,7 @@ out:
70237 * run pte_offset_map on the pmd, if an huge pmd could
70238 * materialize from under us from a different thread.
70239 */
70240- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
70241+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70242 return VM_FAULT_OOM;
70243 /* if an huge pmd materialized from under us just retry later */
70244 if (unlikely(pmd_trans_huge(*pmd)))
70245diff --git a/mm/hugetlb.c b/mm/hugetlb.c
70246index a876871..132cde0 100644
70247--- a/mm/hugetlb.c
70248+++ b/mm/hugetlb.c
70249@@ -2346,6 +2346,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
70250 return 1;
70251 }
70252
70253+#ifdef CONFIG_PAX_SEGMEXEC
70254+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
70255+{
70256+ struct mm_struct *mm = vma->vm_mm;
70257+ struct vm_area_struct *vma_m;
70258+ unsigned long address_m;
70259+ pte_t *ptep_m;
70260+
70261+ vma_m = pax_find_mirror_vma(vma);
70262+ if (!vma_m)
70263+ return;
70264+
70265+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70266+ address_m = address + SEGMEXEC_TASK_SIZE;
70267+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
70268+ get_page(page_m);
70269+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
70270+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
70271+}
70272+#endif
70273+
70274 /*
70275 * Hugetlb_cow() should be called with page lock of the original hugepage held.
70276 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
70277@@ -2459,6 +2480,11 @@ retry_avoidcopy:
70278 make_huge_pte(vma, new_page, 1));
70279 page_remove_rmap(old_page);
70280 hugepage_add_new_anon_rmap(new_page, vma, address);
70281+
70282+#ifdef CONFIG_PAX_SEGMEXEC
70283+ pax_mirror_huge_pte(vma, address, new_page);
70284+#endif
70285+
70286 /* Make the old page be freed below */
70287 new_page = old_page;
70288 mmu_notifier_invalidate_range_end(mm,
70289@@ -2613,6 +2639,10 @@ retry:
70290 && (vma->vm_flags & VM_SHARED)));
70291 set_huge_pte_at(mm, address, ptep, new_pte);
70292
70293+#ifdef CONFIG_PAX_SEGMEXEC
70294+ pax_mirror_huge_pte(vma, address, page);
70295+#endif
70296+
70297 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
70298 /* Optimization, do the COW without a second fault */
70299 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
70300@@ -2642,6 +2672,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70301 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
70302 struct hstate *h = hstate_vma(vma);
70303
70304+#ifdef CONFIG_PAX_SEGMEXEC
70305+ struct vm_area_struct *vma_m;
70306+#endif
70307+
70308 address &= huge_page_mask(h);
70309
70310 ptep = huge_pte_offset(mm, address);
70311@@ -2655,6 +2689,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70312 VM_FAULT_SET_HINDEX(h - hstates);
70313 }
70314
70315+#ifdef CONFIG_PAX_SEGMEXEC
70316+ vma_m = pax_find_mirror_vma(vma);
70317+ if (vma_m) {
70318+ unsigned long address_m;
70319+
70320+ if (vma->vm_start > vma_m->vm_start) {
70321+ address_m = address;
70322+ address -= SEGMEXEC_TASK_SIZE;
70323+ vma = vma_m;
70324+ h = hstate_vma(vma);
70325+ } else
70326+ address_m = address + SEGMEXEC_TASK_SIZE;
70327+
70328+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
70329+ return VM_FAULT_OOM;
70330+ address_m &= HPAGE_MASK;
70331+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
70332+ }
70333+#endif
70334+
70335 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
70336 if (!ptep)
70337 return VM_FAULT_OOM;
70338diff --git a/mm/internal.h b/mm/internal.h
70339index 2189af4..f2ca332 100644
70340--- a/mm/internal.h
70341+++ b/mm/internal.h
70342@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
70343 * in mm/page_alloc.c
70344 */
70345 extern void __free_pages_bootmem(struct page *page, unsigned int order);
70346+extern void free_compound_page(struct page *page);
70347 extern void prep_compound_page(struct page *page, unsigned long order);
70348 #ifdef CONFIG_MEMORY_FAILURE
70349 extern bool is_free_buddy_page(struct page *page);
70350diff --git a/mm/kmemleak.c b/mm/kmemleak.c
70351index 45eb621..6ccd8ea 100644
70352--- a/mm/kmemleak.c
70353+++ b/mm/kmemleak.c
70354@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
70355
70356 for (i = 0; i < object->trace_len; i++) {
70357 void *ptr = (void *)object->trace[i];
70358- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
70359+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
70360 }
70361 }
70362
70363diff --git a/mm/maccess.c b/mm/maccess.c
70364index d53adf9..03a24bf 100644
70365--- a/mm/maccess.c
70366+++ b/mm/maccess.c
70367@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
70368 set_fs(KERNEL_DS);
70369 pagefault_disable();
70370 ret = __copy_from_user_inatomic(dst,
70371- (__force const void __user *)src, size);
70372+ (const void __force_user *)src, size);
70373 pagefault_enable();
70374 set_fs(old_fs);
70375
70376@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
70377
70378 set_fs(KERNEL_DS);
70379 pagefault_disable();
70380- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
70381+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
70382 pagefault_enable();
70383 set_fs(old_fs);
70384
70385diff --git a/mm/madvise.c b/mm/madvise.c
70386index 74bf193..feb6fd3 100644
70387--- a/mm/madvise.c
70388+++ b/mm/madvise.c
70389@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
70390 pgoff_t pgoff;
70391 unsigned long new_flags = vma->vm_flags;
70392
70393+#ifdef CONFIG_PAX_SEGMEXEC
70394+ struct vm_area_struct *vma_m;
70395+#endif
70396+
70397 switch (behavior) {
70398 case MADV_NORMAL:
70399 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
70400@@ -110,6 +114,13 @@ success:
70401 /*
70402 * vm_flags is protected by the mmap_sem held in write mode.
70403 */
70404+
70405+#ifdef CONFIG_PAX_SEGMEXEC
70406+ vma_m = pax_find_mirror_vma(vma);
70407+ if (vma_m)
70408+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
70409+#endif
70410+
70411 vma->vm_flags = new_flags;
70412
70413 out:
70414@@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70415 struct vm_area_struct ** prev,
70416 unsigned long start, unsigned long end)
70417 {
70418+
70419+#ifdef CONFIG_PAX_SEGMEXEC
70420+ struct vm_area_struct *vma_m;
70421+#endif
70422+
70423 *prev = vma;
70424 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
70425 return -EINVAL;
70426@@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70427 zap_page_range(vma, start, end - start, &details);
70428 } else
70429 zap_page_range(vma, start, end - start, NULL);
70430+
70431+#ifdef CONFIG_PAX_SEGMEXEC
70432+ vma_m = pax_find_mirror_vma(vma);
70433+ if (vma_m) {
70434+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
70435+ struct zap_details details = {
70436+ .nonlinear_vma = vma_m,
70437+ .last_index = ULONG_MAX,
70438+ };
70439+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
70440+ } else
70441+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
70442+ }
70443+#endif
70444+
70445 return 0;
70446 }
70447
70448@@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
70449 if (end < start)
70450 goto out;
70451
70452+#ifdef CONFIG_PAX_SEGMEXEC
70453+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70454+ if (end > SEGMEXEC_TASK_SIZE)
70455+ goto out;
70456+ } else
70457+#endif
70458+
70459+ if (end > TASK_SIZE)
70460+ goto out;
70461+
70462 error = 0;
70463 if (end == start)
70464 goto out;
70465diff --git a/mm/memory-failure.c b/mm/memory-failure.c
70466index 56080ea..115071e 100644
70467--- a/mm/memory-failure.c
70468+++ b/mm/memory-failure.c
70469@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
70470
70471 int sysctl_memory_failure_recovery __read_mostly = 1;
70472
70473-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70474+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70475
70476 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70477
70478@@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
70479 si.si_signo = SIGBUS;
70480 si.si_errno = 0;
70481 si.si_code = BUS_MCEERR_AO;
70482- si.si_addr = (void *)addr;
70483+ si.si_addr = (void __user *)addr;
70484 #ifdef __ARCH_SI_TRAPNO
70485 si.si_trapno = trapno;
70486 #endif
70487@@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70488 }
70489
70490 nr_pages = 1 << compound_trans_order(hpage);
70491- atomic_long_add(nr_pages, &mce_bad_pages);
70492+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
70493
70494 /*
70495 * We need/can do nothing about count=0 pages.
70496@@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70497 if (!PageHWPoison(hpage)
70498 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
70499 || (p != hpage && TestSetPageHWPoison(hpage))) {
70500- atomic_long_sub(nr_pages, &mce_bad_pages);
70501+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70502 return 0;
70503 }
70504 set_page_hwpoison_huge_page(hpage);
70505@@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
70506 }
70507 if (hwpoison_filter(p)) {
70508 if (TestClearPageHWPoison(p))
70509- atomic_long_sub(nr_pages, &mce_bad_pages);
70510+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70511 unlock_page(hpage);
70512 put_page(hpage);
70513 return 0;
70514@@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
70515 return 0;
70516 }
70517 if (TestClearPageHWPoison(p))
70518- atomic_long_sub(nr_pages, &mce_bad_pages);
70519+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70520 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
70521 return 0;
70522 }
70523@@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
70524 */
70525 if (TestClearPageHWPoison(page)) {
70526 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
70527- atomic_long_sub(nr_pages, &mce_bad_pages);
70528+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70529 freeit = 1;
70530 if (PageHuge(page))
70531 clear_page_hwpoison_huge_page(page);
70532@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
70533 }
70534 done:
70535 if (!PageHWPoison(hpage))
70536- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
70537+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
70538 set_page_hwpoison_huge_page(hpage);
70539 dequeue_hwpoisoned_huge_page(hpage);
70540 /* keep elevated page count for bad page */
70541@@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
70542 return ret;
70543
70544 done:
70545- atomic_long_add(1, &mce_bad_pages);
70546+ atomic_long_add_unchecked(1, &mce_bad_pages);
70547 SetPageHWPoison(page);
70548 /* keep elevated page count for bad page */
70549 return ret;
70550diff --git a/mm/memory.c b/mm/memory.c
70551index 10b4dda..764ee07 100644
70552--- a/mm/memory.c
70553+++ b/mm/memory.c
70554@@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
70555 return;
70556
70557 pmd = pmd_offset(pud, start);
70558+
70559+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
70560 pud_clear(pud);
70561 pmd_free_tlb(tlb, pmd, start);
70562+#endif
70563+
70564 }
70565
70566 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70567@@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70568 if (end - 1 > ceiling - 1)
70569 return;
70570
70571+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
70572 pud = pud_offset(pgd, start);
70573 pgd_clear(pgd);
70574 pud_free_tlb(tlb, pud, start);
70575+#endif
70576+
70577 }
70578
70579 /*
70580@@ -1593,12 +1600,6 @@ no_page_table:
70581 return page;
70582 }
70583
70584-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
70585-{
70586- return stack_guard_page_start(vma, addr) ||
70587- stack_guard_page_end(vma, addr+PAGE_SIZE);
70588-}
70589-
70590 /**
70591 * __get_user_pages() - pin user pages in memory
70592 * @tsk: task_struct of target task
70593@@ -1671,10 +1672,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70594 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
70595 i = 0;
70596
70597- do {
70598+ while (nr_pages) {
70599 struct vm_area_struct *vma;
70600
70601- vma = find_extend_vma(mm, start);
70602+ vma = find_vma(mm, start);
70603 if (!vma && in_gate_area(mm, start)) {
70604 unsigned long pg = start & PAGE_MASK;
70605 pgd_t *pgd;
70606@@ -1722,7 +1723,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70607 goto next_page;
70608 }
70609
70610- if (!vma ||
70611+ if (!vma || start < vma->vm_start ||
70612 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
70613 !(vm_flags & vma->vm_flags))
70614 return i ? : -EFAULT;
70615@@ -1749,11 +1750,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70616 int ret;
70617 unsigned int fault_flags = 0;
70618
70619- /* For mlock, just skip the stack guard page. */
70620- if (foll_flags & FOLL_MLOCK) {
70621- if (stack_guard_page(vma, start))
70622- goto next_page;
70623- }
70624 if (foll_flags & FOLL_WRITE)
70625 fault_flags |= FAULT_FLAG_WRITE;
70626 if (nonblocking)
70627@@ -1827,7 +1823,7 @@ next_page:
70628 start += PAGE_SIZE;
70629 nr_pages--;
70630 } while (nr_pages && start < vma->vm_end);
70631- } while (nr_pages);
70632+ }
70633 return i;
70634 }
70635 EXPORT_SYMBOL(__get_user_pages);
70636@@ -2034,6 +2030,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
70637 page_add_file_rmap(page);
70638 set_pte_at(mm, addr, pte, mk_pte(page, prot));
70639
70640+#ifdef CONFIG_PAX_SEGMEXEC
70641+ pax_mirror_file_pte(vma, addr, page, ptl);
70642+#endif
70643+
70644 retval = 0;
70645 pte_unmap_unlock(pte, ptl);
70646 return retval;
70647@@ -2068,10 +2068,22 @@ out:
70648 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
70649 struct page *page)
70650 {
70651+
70652+#ifdef CONFIG_PAX_SEGMEXEC
70653+ struct vm_area_struct *vma_m;
70654+#endif
70655+
70656 if (addr < vma->vm_start || addr >= vma->vm_end)
70657 return -EFAULT;
70658 if (!page_count(page))
70659 return -EINVAL;
70660+
70661+#ifdef CONFIG_PAX_SEGMEXEC
70662+ vma_m = pax_find_mirror_vma(vma);
70663+ if (vma_m)
70664+ vma_m->vm_flags |= VM_INSERTPAGE;
70665+#endif
70666+
70667 vma->vm_flags |= VM_INSERTPAGE;
70668 return insert_page(vma, addr, page, vma->vm_page_prot);
70669 }
70670@@ -2157,6 +2169,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
70671 unsigned long pfn)
70672 {
70673 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
70674+ BUG_ON(vma->vm_mirror);
70675
70676 if (addr < vma->vm_start || addr >= vma->vm_end)
70677 return -EFAULT;
70678@@ -2472,6 +2485,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
70679 copy_user_highpage(dst, src, va, vma);
70680 }
70681
70682+#ifdef CONFIG_PAX_SEGMEXEC
70683+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
70684+{
70685+ struct mm_struct *mm = vma->vm_mm;
70686+ spinlock_t *ptl;
70687+ pte_t *pte, entry;
70688+
70689+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
70690+ entry = *pte;
70691+ if (!pte_present(entry)) {
70692+ if (!pte_none(entry)) {
70693+ BUG_ON(pte_file(entry));
70694+ free_swap_and_cache(pte_to_swp_entry(entry));
70695+ pte_clear_not_present_full(mm, address, pte, 0);
70696+ }
70697+ } else {
70698+ struct page *page;
70699+
70700+ flush_cache_page(vma, address, pte_pfn(entry));
70701+ entry = ptep_clear_flush(vma, address, pte);
70702+ BUG_ON(pte_dirty(entry));
70703+ page = vm_normal_page(vma, address, entry);
70704+ if (page) {
70705+ update_hiwater_rss(mm);
70706+ if (PageAnon(page))
70707+ dec_mm_counter_fast(mm, MM_ANONPAGES);
70708+ else
70709+ dec_mm_counter_fast(mm, MM_FILEPAGES);
70710+ page_remove_rmap(page);
70711+ page_cache_release(page);
70712+ }
70713+ }
70714+ pte_unmap_unlock(pte, ptl);
70715+}
70716+
70717+/* PaX: if vma is mirrored, synchronize the mirror's PTE
70718+ *
70719+ * the ptl of the lower mapped page is held on entry and is not released on exit
70720+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
70721+ */
70722+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70723+{
70724+ struct mm_struct *mm = vma->vm_mm;
70725+ unsigned long address_m;
70726+ spinlock_t *ptl_m;
70727+ struct vm_area_struct *vma_m;
70728+ pmd_t *pmd_m;
70729+ pte_t *pte_m, entry_m;
70730+
70731+ BUG_ON(!page_m || !PageAnon(page_m));
70732+
70733+ vma_m = pax_find_mirror_vma(vma);
70734+ if (!vma_m)
70735+ return;
70736+
70737+ BUG_ON(!PageLocked(page_m));
70738+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70739+ address_m = address + SEGMEXEC_TASK_SIZE;
70740+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70741+ pte_m = pte_offset_map(pmd_m, address_m);
70742+ ptl_m = pte_lockptr(mm, pmd_m);
70743+ if (ptl != ptl_m) {
70744+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70745+ if (!pte_none(*pte_m))
70746+ goto out;
70747+ }
70748+
70749+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70750+ page_cache_get(page_m);
70751+ page_add_anon_rmap(page_m, vma_m, address_m);
70752+ inc_mm_counter_fast(mm, MM_ANONPAGES);
70753+ set_pte_at(mm, address_m, pte_m, entry_m);
70754+ update_mmu_cache(vma_m, address_m, entry_m);
70755+out:
70756+ if (ptl != ptl_m)
70757+ spin_unlock(ptl_m);
70758+ pte_unmap(pte_m);
70759+ unlock_page(page_m);
70760+}
70761+
70762+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70763+{
70764+ struct mm_struct *mm = vma->vm_mm;
70765+ unsigned long address_m;
70766+ spinlock_t *ptl_m;
70767+ struct vm_area_struct *vma_m;
70768+ pmd_t *pmd_m;
70769+ pte_t *pte_m, entry_m;
70770+
70771+ BUG_ON(!page_m || PageAnon(page_m));
70772+
70773+ vma_m = pax_find_mirror_vma(vma);
70774+ if (!vma_m)
70775+ return;
70776+
70777+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70778+ address_m = address + SEGMEXEC_TASK_SIZE;
70779+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70780+ pte_m = pte_offset_map(pmd_m, address_m);
70781+ ptl_m = pte_lockptr(mm, pmd_m);
70782+ if (ptl != ptl_m) {
70783+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70784+ if (!pte_none(*pte_m))
70785+ goto out;
70786+ }
70787+
70788+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70789+ page_cache_get(page_m);
70790+ page_add_file_rmap(page_m);
70791+ inc_mm_counter_fast(mm, MM_FILEPAGES);
70792+ set_pte_at(mm, address_m, pte_m, entry_m);
70793+ update_mmu_cache(vma_m, address_m, entry_m);
70794+out:
70795+ if (ptl != ptl_m)
70796+ spin_unlock(ptl_m);
70797+ pte_unmap(pte_m);
70798+}
70799+
70800+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70801+{
70802+ struct mm_struct *mm = vma->vm_mm;
70803+ unsigned long address_m;
70804+ spinlock_t *ptl_m;
70805+ struct vm_area_struct *vma_m;
70806+ pmd_t *pmd_m;
70807+ pte_t *pte_m, entry_m;
70808+
70809+ vma_m = pax_find_mirror_vma(vma);
70810+ if (!vma_m)
70811+ return;
70812+
70813+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70814+ address_m = address + SEGMEXEC_TASK_SIZE;
70815+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70816+ pte_m = pte_offset_map(pmd_m, address_m);
70817+ ptl_m = pte_lockptr(mm, pmd_m);
70818+ if (ptl != ptl_m) {
70819+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70820+ if (!pte_none(*pte_m))
70821+ goto out;
70822+ }
70823+
70824+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70825+ set_pte_at(mm, address_m, pte_m, entry_m);
70826+out:
70827+ if (ptl != ptl_m)
70828+ spin_unlock(ptl_m);
70829+ pte_unmap(pte_m);
70830+}
70831+
70832+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70833+{
70834+ struct page *page_m;
70835+ pte_t entry;
70836+
70837+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70838+ goto out;
70839+
70840+ entry = *pte;
70841+ page_m = vm_normal_page(vma, address, entry);
70842+ if (!page_m)
70843+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70844+ else if (PageAnon(page_m)) {
70845+ if (pax_find_mirror_vma(vma)) {
70846+ pte_unmap_unlock(pte, ptl);
70847+ lock_page(page_m);
70848+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70849+ if (pte_same(entry, *pte))
70850+ pax_mirror_anon_pte(vma, address, page_m, ptl);
70851+ else
70852+ unlock_page(page_m);
70853+ }
70854+ } else
70855+ pax_mirror_file_pte(vma, address, page_m, ptl);
70856+
70857+out:
70858+ pte_unmap_unlock(pte, ptl);
70859+}
70860+#endif
70861+
70862 /*
70863 * This routine handles present pages, when users try to write
70864 * to a shared page. It is done by copying the page to a new address
70865@@ -2683,6 +2876,12 @@ gotten:
70866 */
70867 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70868 if (likely(pte_same(*page_table, orig_pte))) {
70869+
70870+#ifdef CONFIG_PAX_SEGMEXEC
70871+ if (pax_find_mirror_vma(vma))
70872+ BUG_ON(!trylock_page(new_page));
70873+#endif
70874+
70875 if (old_page) {
70876 if (!PageAnon(old_page)) {
70877 dec_mm_counter_fast(mm, MM_FILEPAGES);
70878@@ -2734,6 +2933,10 @@ gotten:
70879 page_remove_rmap(old_page);
70880 }
70881
70882+#ifdef CONFIG_PAX_SEGMEXEC
70883+ pax_mirror_anon_pte(vma, address, new_page, ptl);
70884+#endif
70885+
70886 /* Free the old page.. */
70887 new_page = old_page;
70888 ret |= VM_FAULT_WRITE;
70889@@ -3013,6 +3216,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70890 swap_free(entry);
70891 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70892 try_to_free_swap(page);
70893+
70894+#ifdef CONFIG_PAX_SEGMEXEC
70895+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70896+#endif
70897+
70898 unlock_page(page);
70899 if (swapcache) {
70900 /*
70901@@ -3036,6 +3244,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70902
70903 /* No need to invalidate - it was non-present before */
70904 update_mmu_cache(vma, address, page_table);
70905+
70906+#ifdef CONFIG_PAX_SEGMEXEC
70907+ pax_mirror_anon_pte(vma, address, page, ptl);
70908+#endif
70909+
70910 unlock:
70911 pte_unmap_unlock(page_table, ptl);
70912 out:
70913@@ -3055,40 +3268,6 @@ out_release:
70914 }
70915
70916 /*
70917- * This is like a special single-page "expand_{down|up}wards()",
70918- * except we must first make sure that 'address{-|+}PAGE_SIZE'
70919- * doesn't hit another vma.
70920- */
70921-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
70922-{
70923- address &= PAGE_MASK;
70924- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
70925- struct vm_area_struct *prev = vma->vm_prev;
70926-
70927- /*
70928- * Is there a mapping abutting this one below?
70929- *
70930- * That's only ok if it's the same stack mapping
70931- * that has gotten split..
70932- */
70933- if (prev && prev->vm_end == address)
70934- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
70935-
70936- expand_downwards(vma, address - PAGE_SIZE);
70937- }
70938- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
70939- struct vm_area_struct *next = vma->vm_next;
70940-
70941- /* As VM_GROWSDOWN but s/below/above/ */
70942- if (next && next->vm_start == address + PAGE_SIZE)
70943- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
70944-
70945- expand_upwards(vma, address + PAGE_SIZE);
70946- }
70947- return 0;
70948-}
70949-
70950-/*
70951 * We enter with non-exclusive mmap_sem (to exclude vma changes,
70952 * but allow concurrent faults), and pte mapped but not yet locked.
70953 * We return with mmap_sem still held, but pte unmapped and unlocked.
70954@@ -3097,27 +3276,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70955 unsigned long address, pte_t *page_table, pmd_t *pmd,
70956 unsigned int flags)
70957 {
70958- struct page *page;
70959+ struct page *page = NULL;
70960 spinlock_t *ptl;
70961 pte_t entry;
70962
70963- pte_unmap(page_table);
70964-
70965- /* Check if we need to add a guard page to the stack */
70966- if (check_stack_guard_page(vma, address) < 0)
70967- return VM_FAULT_SIGBUS;
70968-
70969- /* Use the zero-page for reads */
70970 if (!(flags & FAULT_FLAG_WRITE)) {
70971 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
70972 vma->vm_page_prot));
70973- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70974+ ptl = pte_lockptr(mm, pmd);
70975+ spin_lock(ptl);
70976 if (!pte_none(*page_table))
70977 goto unlock;
70978 goto setpte;
70979 }
70980
70981 /* Allocate our own private page. */
70982+ pte_unmap(page_table);
70983+
70984 if (unlikely(anon_vma_prepare(vma)))
70985 goto oom;
70986 page = alloc_zeroed_user_highpage_movable(vma, address);
70987@@ -3136,6 +3311,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70988 if (!pte_none(*page_table))
70989 goto release;
70990
70991+#ifdef CONFIG_PAX_SEGMEXEC
70992+ if (pax_find_mirror_vma(vma))
70993+ BUG_ON(!trylock_page(page));
70994+#endif
70995+
70996 inc_mm_counter_fast(mm, MM_ANONPAGES);
70997 page_add_new_anon_rmap(page, vma, address);
70998 setpte:
70999@@ -3143,6 +3323,12 @@ setpte:
71000
71001 /* No need to invalidate - it was non-present before */
71002 update_mmu_cache(vma, address, page_table);
71003+
71004+#ifdef CONFIG_PAX_SEGMEXEC
71005+ if (page)
71006+ pax_mirror_anon_pte(vma, address, page, ptl);
71007+#endif
71008+
71009 unlock:
71010 pte_unmap_unlock(page_table, ptl);
71011 return 0;
71012@@ -3286,6 +3472,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71013 */
71014 /* Only go through if we didn't race with anybody else... */
71015 if (likely(pte_same(*page_table, orig_pte))) {
71016+
71017+#ifdef CONFIG_PAX_SEGMEXEC
71018+ if (anon && pax_find_mirror_vma(vma))
71019+ BUG_ON(!trylock_page(page));
71020+#endif
71021+
71022 flush_icache_page(vma, page);
71023 entry = mk_pte(page, vma->vm_page_prot);
71024 if (flags & FAULT_FLAG_WRITE)
71025@@ -3305,6 +3497,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71026
71027 /* no need to invalidate: a not-present page won't be cached */
71028 update_mmu_cache(vma, address, page_table);
71029+
71030+#ifdef CONFIG_PAX_SEGMEXEC
71031+ if (anon)
71032+ pax_mirror_anon_pte(vma, address, page, ptl);
71033+ else
71034+ pax_mirror_file_pte(vma, address, page, ptl);
71035+#endif
71036+
71037 } else {
71038 if (cow_page)
71039 mem_cgroup_uncharge_page(cow_page);
71040@@ -3458,6 +3658,12 @@ int handle_pte_fault(struct mm_struct *mm,
71041 if (flags & FAULT_FLAG_WRITE)
71042 flush_tlb_fix_spurious_fault(vma, address);
71043 }
71044+
71045+#ifdef CONFIG_PAX_SEGMEXEC
71046+ pax_mirror_pte(vma, address, pte, pmd, ptl);
71047+ return 0;
71048+#endif
71049+
71050 unlock:
71051 pte_unmap_unlock(pte, ptl);
71052 return 0;
71053@@ -3474,6 +3680,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71054 pmd_t *pmd;
71055 pte_t *pte;
71056
71057+#ifdef CONFIG_PAX_SEGMEXEC
71058+ struct vm_area_struct *vma_m;
71059+#endif
71060+
71061 __set_current_state(TASK_RUNNING);
71062
71063 count_vm_event(PGFAULT);
71064@@ -3485,6 +3695,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71065 if (unlikely(is_vm_hugetlb_page(vma)))
71066 return hugetlb_fault(mm, vma, address, flags);
71067
71068+#ifdef CONFIG_PAX_SEGMEXEC
71069+ vma_m = pax_find_mirror_vma(vma);
71070+ if (vma_m) {
71071+ unsigned long address_m;
71072+ pgd_t *pgd_m;
71073+ pud_t *pud_m;
71074+ pmd_t *pmd_m;
71075+
71076+ if (vma->vm_start > vma_m->vm_start) {
71077+ address_m = address;
71078+ address -= SEGMEXEC_TASK_SIZE;
71079+ vma = vma_m;
71080+ } else
71081+ address_m = address + SEGMEXEC_TASK_SIZE;
71082+
71083+ pgd_m = pgd_offset(mm, address_m);
71084+ pud_m = pud_alloc(mm, pgd_m, address_m);
71085+ if (!pud_m)
71086+ return VM_FAULT_OOM;
71087+ pmd_m = pmd_alloc(mm, pud_m, address_m);
71088+ if (!pmd_m)
71089+ return VM_FAULT_OOM;
71090+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
71091+ return VM_FAULT_OOM;
71092+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
71093+ }
71094+#endif
71095+
71096 pgd = pgd_offset(mm, address);
71097 pud = pud_alloc(mm, pgd, address);
71098 if (!pud)
71099@@ -3514,7 +3752,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71100 * run pte_offset_map on the pmd, if an huge pmd could
71101 * materialize from under us from a different thread.
71102 */
71103- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
71104+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
71105 return VM_FAULT_OOM;
71106 /* if an huge pmd materialized from under us just retry later */
71107 if (unlikely(pmd_trans_huge(*pmd)))
71108@@ -3618,7 +3856,7 @@ static int __init gate_vma_init(void)
71109 gate_vma.vm_start = FIXADDR_USER_START;
71110 gate_vma.vm_end = FIXADDR_USER_END;
71111 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
71112- gate_vma.vm_page_prot = __P101;
71113+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
71114 /*
71115 * Make sure the vDSO gets into every core dump.
71116 * Dumping its contents makes post-mortem fully interpretable later
71117diff --git a/mm/mempolicy.c b/mm/mempolicy.c
71118index 0a37570..2048346 100644
71119--- a/mm/mempolicy.c
71120+++ b/mm/mempolicy.c
71121@@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71122 unsigned long vmstart;
71123 unsigned long vmend;
71124
71125+#ifdef CONFIG_PAX_SEGMEXEC
71126+ struct vm_area_struct *vma_m;
71127+#endif
71128+
71129 vma = find_vma(mm, start);
71130 if (!vma || vma->vm_start > start)
71131 return -EFAULT;
71132@@ -679,6 +683,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71133 err = policy_vma(vma, new_pol);
71134 if (err)
71135 goto out;
71136+
71137+#ifdef CONFIG_PAX_SEGMEXEC
71138+ vma_m = pax_find_mirror_vma(vma);
71139+ if (vma_m) {
71140+ err = policy_vma(vma_m, new_pol);
71141+ if (err)
71142+ goto out;
71143+ }
71144+#endif
71145+
71146 }
71147
71148 out:
71149@@ -1112,6 +1126,17 @@ static long do_mbind(unsigned long start, unsigned long len,
71150
71151 if (end < start)
71152 return -EINVAL;
71153+
71154+#ifdef CONFIG_PAX_SEGMEXEC
71155+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71156+ if (end > SEGMEXEC_TASK_SIZE)
71157+ return -EINVAL;
71158+ } else
71159+#endif
71160+
71161+ if (end > TASK_SIZE)
71162+ return -EINVAL;
71163+
71164 if (end == start)
71165 return 0;
71166
71167@@ -1330,6 +1355,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71168 if (!mm)
71169 goto out;
71170
71171+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71172+ if (mm != current->mm &&
71173+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
71174+ err = -EPERM;
71175+ goto out;
71176+ }
71177+#endif
71178+
71179 /*
71180 * Check if this process has the right to modify the specified
71181 * process. The right exists if the process has administrative
71182@@ -1339,8 +1372,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71183 rcu_read_lock();
71184 tcred = __task_cred(task);
71185 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
71186- cred->uid != tcred->suid && cred->uid != tcred->uid &&
71187- !capable(CAP_SYS_NICE)) {
71188+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
71189 rcu_read_unlock();
71190 err = -EPERM;
71191 goto out;
71192diff --git a/mm/migrate.c b/mm/migrate.c
71193index 1503b6b..156c672 100644
71194--- a/mm/migrate.c
71195+++ b/mm/migrate.c
71196@@ -1370,6 +1370,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
71197 if (!mm)
71198 return -EINVAL;
71199
71200+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71201+ if (mm != current->mm &&
71202+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
71203+ err = -EPERM;
71204+ goto out;
71205+ }
71206+#endif
71207+
71208 /*
71209 * Check if this process has the right to modify the specified
71210 * process. The right exists if the process has administrative
71211@@ -1379,8 +1387,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
71212 rcu_read_lock();
71213 tcred = __task_cred(task);
71214 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
71215- cred->uid != tcred->suid && cred->uid != tcred->uid &&
71216- !capable(CAP_SYS_NICE)) {
71217+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
71218 rcu_read_unlock();
71219 err = -EPERM;
71220 goto out;
71221diff --git a/mm/mlock.c b/mm/mlock.c
71222index ef726e8..13e0901 100644
71223--- a/mm/mlock.c
71224+++ b/mm/mlock.c
71225@@ -13,6 +13,7 @@
71226 #include <linux/pagemap.h>
71227 #include <linux/mempolicy.h>
71228 #include <linux/syscalls.h>
71229+#include <linux/security.h>
71230 #include <linux/sched.h>
71231 #include <linux/export.h>
71232 #include <linux/rmap.h>
71233@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
71234 return -EINVAL;
71235 if (end == start)
71236 return 0;
71237+ if (end > TASK_SIZE)
71238+ return -EINVAL;
71239+
71240 vma = find_vma(current->mm, start);
71241 if (!vma || vma->vm_start > start)
71242 return -ENOMEM;
71243@@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
71244 for (nstart = start ; ; ) {
71245 vm_flags_t newflags;
71246
71247+#ifdef CONFIG_PAX_SEGMEXEC
71248+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71249+ break;
71250+#endif
71251+
71252 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
71253
71254 newflags = vma->vm_flags | VM_LOCKED;
71255@@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
71256 lock_limit >>= PAGE_SHIFT;
71257
71258 /* check against resource limits */
71259+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
71260 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
71261 error = do_mlock(start, len, 1);
71262 up_write(&current->mm->mmap_sem);
71263@@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
71264 static int do_mlockall(int flags)
71265 {
71266 struct vm_area_struct * vma, * prev = NULL;
71267- unsigned int def_flags = 0;
71268
71269 if (flags & MCL_FUTURE)
71270- def_flags = VM_LOCKED;
71271- current->mm->def_flags = def_flags;
71272+ current->mm->def_flags |= VM_LOCKED;
71273+ else
71274+ current->mm->def_flags &= ~VM_LOCKED;
71275 if (flags == MCL_FUTURE)
71276 goto out;
71277
71278 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
71279 vm_flags_t newflags;
71280
71281+#ifdef CONFIG_PAX_SEGMEXEC
71282+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71283+ break;
71284+#endif
71285+
71286+ BUG_ON(vma->vm_end > TASK_SIZE);
71287 newflags = vma->vm_flags | VM_LOCKED;
71288 if (!(flags & MCL_CURRENT))
71289 newflags &= ~VM_LOCKED;
71290@@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
71291 lock_limit >>= PAGE_SHIFT;
71292
71293 ret = -ENOMEM;
71294+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
71295 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
71296 capable(CAP_IPC_LOCK))
71297 ret = do_mlockall(flags);
71298diff --git a/mm/mmap.c b/mm/mmap.c
71299index da15a79..314aef3 100644
71300--- a/mm/mmap.c
71301+++ b/mm/mmap.c
71302@@ -46,6 +46,16 @@
71303 #define arch_rebalance_pgtables(addr, len) (addr)
71304 #endif
71305
71306+static inline void verify_mm_writelocked(struct mm_struct *mm)
71307+{
71308+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
71309+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71310+ up_read(&mm->mmap_sem);
71311+ BUG();
71312+ }
71313+#endif
71314+}
71315+
71316 static void unmap_region(struct mm_struct *mm,
71317 struct vm_area_struct *vma, struct vm_area_struct *prev,
71318 unsigned long start, unsigned long end);
71319@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
71320 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
71321 *
71322 */
71323-pgprot_t protection_map[16] = {
71324+pgprot_t protection_map[16] __read_only = {
71325 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
71326 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
71327 };
71328
71329-pgprot_t vm_get_page_prot(unsigned long vm_flags)
71330+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
71331 {
71332- return __pgprot(pgprot_val(protection_map[vm_flags &
71333+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
71334 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
71335 pgprot_val(arch_vm_get_page_prot(vm_flags)));
71336+
71337+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71338+ if (!(__supported_pte_mask & _PAGE_NX) &&
71339+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
71340+ (vm_flags & (VM_READ | VM_WRITE)))
71341+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
71342+#endif
71343+
71344+ return prot;
71345 }
71346 EXPORT_SYMBOL(vm_get_page_prot);
71347
71348 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
71349 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
71350 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
71351+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
71352 /*
71353 * Make sure vm_committed_as in one cacheline and not cacheline shared with
71354 * other variables. It can be updated by several CPUs frequently.
71355@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
71356 struct vm_area_struct *next = vma->vm_next;
71357
71358 might_sleep();
71359+ BUG_ON(vma->vm_mirror);
71360 if (vma->vm_ops && vma->vm_ops->close)
71361 vma->vm_ops->close(vma);
71362 if (vma->vm_file) {
71363@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
71364 * not page aligned -Ram Gupta
71365 */
71366 rlim = rlimit(RLIMIT_DATA);
71367+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
71368 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
71369 (mm->end_data - mm->start_data) > rlim)
71370 goto out;
71371@@ -689,6 +711,12 @@ static int
71372 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
71373 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71374 {
71375+
71376+#ifdef CONFIG_PAX_SEGMEXEC
71377+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
71378+ return 0;
71379+#endif
71380+
71381 if (is_mergeable_vma(vma, file, vm_flags) &&
71382 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71383 if (vma->vm_pgoff == vm_pgoff)
71384@@ -708,6 +736,12 @@ static int
71385 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71386 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71387 {
71388+
71389+#ifdef CONFIG_PAX_SEGMEXEC
71390+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
71391+ return 0;
71392+#endif
71393+
71394 if (is_mergeable_vma(vma, file, vm_flags) &&
71395 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71396 pgoff_t vm_pglen;
71397@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71398 struct vm_area_struct *vma_merge(struct mm_struct *mm,
71399 struct vm_area_struct *prev, unsigned long addr,
71400 unsigned long end, unsigned long vm_flags,
71401- struct anon_vma *anon_vma, struct file *file,
71402+ struct anon_vma *anon_vma, struct file *file,
71403 pgoff_t pgoff, struct mempolicy *policy)
71404 {
71405 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
71406 struct vm_area_struct *area, *next;
71407 int err;
71408
71409+#ifdef CONFIG_PAX_SEGMEXEC
71410+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
71411+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
71412+
71413+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
71414+#endif
71415+
71416 /*
71417 * We later require that vma->vm_flags == vm_flags,
71418 * so this tests vma->vm_flags & VM_SPECIAL, too.
71419@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71420 if (next && next->vm_end == end) /* cases 6, 7, 8 */
71421 next = next->vm_next;
71422
71423+#ifdef CONFIG_PAX_SEGMEXEC
71424+ if (prev)
71425+ prev_m = pax_find_mirror_vma(prev);
71426+ if (area)
71427+ area_m = pax_find_mirror_vma(area);
71428+ if (next)
71429+ next_m = pax_find_mirror_vma(next);
71430+#endif
71431+
71432 /*
71433 * Can it merge with the predecessor?
71434 */
71435@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71436 /* cases 1, 6 */
71437 err = vma_adjust(prev, prev->vm_start,
71438 next->vm_end, prev->vm_pgoff, NULL);
71439- } else /* cases 2, 5, 7 */
71440+
71441+#ifdef CONFIG_PAX_SEGMEXEC
71442+ if (!err && prev_m)
71443+ err = vma_adjust(prev_m, prev_m->vm_start,
71444+ next_m->vm_end, prev_m->vm_pgoff, NULL);
71445+#endif
71446+
71447+ } else { /* cases 2, 5, 7 */
71448 err = vma_adjust(prev, prev->vm_start,
71449 end, prev->vm_pgoff, NULL);
71450+
71451+#ifdef CONFIG_PAX_SEGMEXEC
71452+ if (!err && prev_m)
71453+ err = vma_adjust(prev_m, prev_m->vm_start,
71454+ end_m, prev_m->vm_pgoff, NULL);
71455+#endif
71456+
71457+ }
71458 if (err)
71459 return NULL;
71460 khugepaged_enter_vma_merge(prev);
71461@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71462 mpol_equal(policy, vma_policy(next)) &&
71463 can_vma_merge_before(next, vm_flags,
71464 anon_vma, file, pgoff+pglen)) {
71465- if (prev && addr < prev->vm_end) /* case 4 */
71466+ if (prev && addr < prev->vm_end) { /* case 4 */
71467 err = vma_adjust(prev, prev->vm_start,
71468 addr, prev->vm_pgoff, NULL);
71469- else /* cases 3, 8 */
71470+
71471+#ifdef CONFIG_PAX_SEGMEXEC
71472+ if (!err && prev_m)
71473+ err = vma_adjust(prev_m, prev_m->vm_start,
71474+ addr_m, prev_m->vm_pgoff, NULL);
71475+#endif
71476+
71477+ } else { /* cases 3, 8 */
71478 err = vma_adjust(area, addr, next->vm_end,
71479 next->vm_pgoff - pglen, NULL);
71480+
71481+#ifdef CONFIG_PAX_SEGMEXEC
71482+ if (!err && area_m)
71483+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
71484+ next_m->vm_pgoff - pglen, NULL);
71485+#endif
71486+
71487+ }
71488 if (err)
71489 return NULL;
71490 khugepaged_enter_vma_merge(area);
71491@@ -921,14 +1001,11 @@ none:
71492 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
71493 struct file *file, long pages)
71494 {
71495- const unsigned long stack_flags
71496- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
71497-
71498 if (file) {
71499 mm->shared_vm += pages;
71500 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
71501 mm->exec_vm += pages;
71502- } else if (flags & stack_flags)
71503+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
71504 mm->stack_vm += pages;
71505 if (flags & (VM_RESERVED|VM_IO))
71506 mm->reserved_vm += pages;
71507@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71508 * (the exception is when the underlying filesystem is noexec
71509 * mounted, in which case we dont add PROT_EXEC.)
71510 */
71511- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71512+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71513 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
71514 prot |= PROT_EXEC;
71515
71516@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71517 /* Obtain the address to map to. we verify (or select) it and ensure
71518 * that it represents a valid section of the address space.
71519 */
71520- addr = get_unmapped_area(file, addr, len, pgoff, flags);
71521+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
71522 if (addr & ~PAGE_MASK)
71523 return addr;
71524
71525@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71526 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
71527 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
71528
71529+#ifdef CONFIG_PAX_MPROTECT
71530+ if (mm->pax_flags & MF_PAX_MPROTECT) {
71531+#ifndef CONFIG_PAX_MPROTECT_COMPAT
71532+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
71533+ gr_log_rwxmmap(file);
71534+
71535+#ifdef CONFIG_PAX_EMUPLT
71536+ vm_flags &= ~VM_EXEC;
71537+#else
71538+ return -EPERM;
71539+#endif
71540+
71541+ }
71542+
71543+ if (!(vm_flags & VM_EXEC))
71544+ vm_flags &= ~VM_MAYEXEC;
71545+#else
71546+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71547+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71548+#endif
71549+ else
71550+ vm_flags &= ~VM_MAYWRITE;
71551+ }
71552+#endif
71553+
71554+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71555+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
71556+ vm_flags &= ~VM_PAGEEXEC;
71557+#endif
71558+
71559 if (flags & MAP_LOCKED)
71560 if (!can_do_mlock())
71561 return -EPERM;
71562@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71563 locked += mm->locked_vm;
71564 lock_limit = rlimit(RLIMIT_MEMLOCK);
71565 lock_limit >>= PAGE_SHIFT;
71566+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71567 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
71568 return -EAGAIN;
71569 }
71570@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71571 if (error)
71572 return error;
71573
71574+ if (!gr_acl_handle_mmap(file, prot))
71575+ return -EACCES;
71576+
71577 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
71578 }
71579 EXPORT_SYMBOL(do_mmap_pgoff);
71580@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
71581 vm_flags_t vm_flags = vma->vm_flags;
71582
71583 /* If it was private or non-writable, the write bit is already clear */
71584- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
71585+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
71586 return 0;
71587
71588 /* The backer wishes to know when pages are first written to? */
71589@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
71590 unsigned long charged = 0;
71591 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
71592
71593+#ifdef CONFIG_PAX_SEGMEXEC
71594+ struct vm_area_struct *vma_m = NULL;
71595+#endif
71596+
71597+ /*
71598+ * mm->mmap_sem is required to protect against another thread
71599+ * changing the mappings in case we sleep.
71600+ */
71601+ verify_mm_writelocked(mm);
71602+
71603 /* Clear old maps */
71604 error = -ENOMEM;
71605-munmap_back:
71606 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71607 if (vma && vma->vm_start < addr + len) {
71608 if (do_munmap(mm, addr, len))
71609 return -ENOMEM;
71610- goto munmap_back;
71611+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71612+ BUG_ON(vma && vma->vm_start < addr + len);
71613 }
71614
71615 /* Check against address space limit. */
71616@@ -1258,6 +1379,16 @@ munmap_back:
71617 goto unacct_error;
71618 }
71619
71620+#ifdef CONFIG_PAX_SEGMEXEC
71621+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
71622+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71623+ if (!vma_m) {
71624+ error = -ENOMEM;
71625+ goto free_vma;
71626+ }
71627+ }
71628+#endif
71629+
71630 vma->vm_mm = mm;
71631 vma->vm_start = addr;
71632 vma->vm_end = addr + len;
71633@@ -1282,6 +1413,19 @@ munmap_back:
71634 error = file->f_op->mmap(file, vma);
71635 if (error)
71636 goto unmap_and_free_vma;
71637+
71638+#ifdef CONFIG_PAX_SEGMEXEC
71639+ if (vma_m && (vm_flags & VM_EXECUTABLE))
71640+ added_exe_file_vma(mm);
71641+#endif
71642+
71643+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71644+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
71645+ vma->vm_flags |= VM_PAGEEXEC;
71646+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71647+ }
71648+#endif
71649+
71650 if (vm_flags & VM_EXECUTABLE)
71651 added_exe_file_vma(mm);
71652
71653@@ -1319,6 +1463,11 @@ munmap_back:
71654 vma_link(mm, vma, prev, rb_link, rb_parent);
71655 file = vma->vm_file;
71656
71657+#ifdef CONFIG_PAX_SEGMEXEC
71658+ if (vma_m)
71659+ BUG_ON(pax_mirror_vma(vma_m, vma));
71660+#endif
71661+
71662 /* Once vma denies write, undo our temporary denial count */
71663 if (correct_wcount)
71664 atomic_inc(&inode->i_writecount);
71665@@ -1327,6 +1476,7 @@ out:
71666
71667 mm->total_vm += len >> PAGE_SHIFT;
71668 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
71669+ track_exec_limit(mm, addr, addr + len, vm_flags);
71670 if (vm_flags & VM_LOCKED) {
71671 if (!mlock_vma_pages_range(vma, addr, addr + len))
71672 mm->locked_vm += (len >> PAGE_SHIFT);
71673@@ -1344,6 +1494,12 @@ unmap_and_free_vma:
71674 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
71675 charged = 0;
71676 free_vma:
71677+
71678+#ifdef CONFIG_PAX_SEGMEXEC
71679+ if (vma_m)
71680+ kmem_cache_free(vm_area_cachep, vma_m);
71681+#endif
71682+
71683 kmem_cache_free(vm_area_cachep, vma);
71684 unacct_error:
71685 if (charged)
71686@@ -1351,6 +1507,44 @@ unacct_error:
71687 return error;
71688 }
71689
71690+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
71691+{
71692+ if (!vma) {
71693+#ifdef CONFIG_STACK_GROWSUP
71694+ if (addr > sysctl_heap_stack_gap)
71695+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
71696+ else
71697+ vma = find_vma(current->mm, 0);
71698+ if (vma && (vma->vm_flags & VM_GROWSUP))
71699+ return false;
71700+#endif
71701+ return true;
71702+ }
71703+
71704+ if (addr + len > vma->vm_start)
71705+ return false;
71706+
71707+ if (vma->vm_flags & VM_GROWSDOWN)
71708+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
71709+#ifdef CONFIG_STACK_GROWSUP
71710+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
71711+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
71712+#endif
71713+
71714+ return true;
71715+}
71716+
71717+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
71718+{
71719+ if (vma->vm_start < len)
71720+ return -ENOMEM;
71721+ if (!(vma->vm_flags & VM_GROWSDOWN))
71722+ return vma->vm_start - len;
71723+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
71724+ return vma->vm_start - len - sysctl_heap_stack_gap;
71725+ return -ENOMEM;
71726+}
71727+
71728 /* Get an address range which is currently unmapped.
71729 * For shmat() with addr=0.
71730 *
71731@@ -1377,18 +1571,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
71732 if (flags & MAP_FIXED)
71733 return addr;
71734
71735+#ifdef CONFIG_PAX_RANDMMAP
71736+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71737+#endif
71738+
71739 if (addr) {
71740 addr = PAGE_ALIGN(addr);
71741- vma = find_vma(mm, addr);
71742- if (TASK_SIZE - len >= addr &&
71743- (!vma || addr + len <= vma->vm_start))
71744- return addr;
71745+ if (TASK_SIZE - len >= addr) {
71746+ vma = find_vma(mm, addr);
71747+ if (check_heap_stack_gap(vma, addr, len))
71748+ return addr;
71749+ }
71750 }
71751 if (len > mm->cached_hole_size) {
71752- start_addr = addr = mm->free_area_cache;
71753+ start_addr = addr = mm->free_area_cache;
71754 } else {
71755- start_addr = addr = TASK_UNMAPPED_BASE;
71756- mm->cached_hole_size = 0;
71757+ start_addr = addr = mm->mmap_base;
71758+ mm->cached_hole_size = 0;
71759 }
71760
71761 full_search:
71762@@ -1399,34 +1598,40 @@ full_search:
71763 * Start a new search - just in case we missed
71764 * some holes.
71765 */
71766- if (start_addr != TASK_UNMAPPED_BASE) {
71767- addr = TASK_UNMAPPED_BASE;
71768- start_addr = addr;
71769+ if (start_addr != mm->mmap_base) {
71770+ start_addr = addr = mm->mmap_base;
71771 mm->cached_hole_size = 0;
71772 goto full_search;
71773 }
71774 return -ENOMEM;
71775 }
71776- if (!vma || addr + len <= vma->vm_start) {
71777- /*
71778- * Remember the place where we stopped the search:
71779- */
71780- mm->free_area_cache = addr + len;
71781- return addr;
71782- }
71783+ if (check_heap_stack_gap(vma, addr, len))
71784+ break;
71785 if (addr + mm->cached_hole_size < vma->vm_start)
71786 mm->cached_hole_size = vma->vm_start - addr;
71787 addr = vma->vm_end;
71788 }
71789+
71790+ /*
71791+ * Remember the place where we stopped the search:
71792+ */
71793+ mm->free_area_cache = addr + len;
71794+ return addr;
71795 }
71796 #endif
71797
71798 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71799 {
71800+
71801+#ifdef CONFIG_PAX_SEGMEXEC
71802+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71803+ return;
71804+#endif
71805+
71806 /*
71807 * Is this a new hole at the lowest possible address?
71808 */
71809- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
71810+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
71811 mm->free_area_cache = addr;
71812 mm->cached_hole_size = ~0UL;
71813 }
71814@@ -1444,7 +1649,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71815 {
71816 struct vm_area_struct *vma;
71817 struct mm_struct *mm = current->mm;
71818- unsigned long addr = addr0;
71819+ unsigned long base = mm->mmap_base, addr = addr0;
71820
71821 /* requested length too big for entire address space */
71822 if (len > TASK_SIZE)
71823@@ -1453,13 +1658,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71824 if (flags & MAP_FIXED)
71825 return addr;
71826
71827+#ifdef CONFIG_PAX_RANDMMAP
71828+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71829+#endif
71830+
71831 /* requesting a specific address */
71832 if (addr) {
71833 addr = PAGE_ALIGN(addr);
71834- vma = find_vma(mm, addr);
71835- if (TASK_SIZE - len >= addr &&
71836- (!vma || addr + len <= vma->vm_start))
71837- return addr;
71838+ if (TASK_SIZE - len >= addr) {
71839+ vma = find_vma(mm, addr);
71840+ if (check_heap_stack_gap(vma, addr, len))
71841+ return addr;
71842+ }
71843 }
71844
71845 /* check if free_area_cache is useful for us */
71846@@ -1474,7 +1684,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71847 /* make sure it can fit in the remaining address space */
71848 if (addr > len) {
71849 vma = find_vma(mm, addr-len);
71850- if (!vma || addr <= vma->vm_start)
71851+ if (check_heap_stack_gap(vma, addr - len, len))
71852 /* remember the address as a hint for next time */
71853 return (mm->free_area_cache = addr-len);
71854 }
71855@@ -1491,7 +1701,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71856 * return with success:
71857 */
71858 vma = find_vma(mm, addr);
71859- if (!vma || addr+len <= vma->vm_start)
71860+ if (check_heap_stack_gap(vma, addr, len))
71861 /* remember the address as a hint for next time */
71862 return (mm->free_area_cache = addr);
71863
71864@@ -1500,8 +1710,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71865 mm->cached_hole_size = vma->vm_start - addr;
71866
71867 /* try just below the current vma->vm_start */
71868- addr = vma->vm_start-len;
71869- } while (len < vma->vm_start);
71870+ addr = skip_heap_stack_gap(vma, len);
71871+ } while (!IS_ERR_VALUE(addr));
71872
71873 bottomup:
71874 /*
71875@@ -1510,13 +1720,21 @@ bottomup:
71876 * can happen with large stack limits and large mmap()
71877 * allocations.
71878 */
71879+ mm->mmap_base = TASK_UNMAPPED_BASE;
71880+
71881+#ifdef CONFIG_PAX_RANDMMAP
71882+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71883+ mm->mmap_base += mm->delta_mmap;
71884+#endif
71885+
71886+ mm->free_area_cache = mm->mmap_base;
71887 mm->cached_hole_size = ~0UL;
71888- mm->free_area_cache = TASK_UNMAPPED_BASE;
71889 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
71890 /*
71891 * Restore the topdown base:
71892 */
71893- mm->free_area_cache = mm->mmap_base;
71894+ mm->mmap_base = base;
71895+ mm->free_area_cache = base;
71896 mm->cached_hole_size = ~0UL;
71897
71898 return addr;
71899@@ -1525,6 +1743,12 @@ bottomup:
71900
71901 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71902 {
71903+
71904+#ifdef CONFIG_PAX_SEGMEXEC
71905+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71906+ return;
71907+#endif
71908+
71909 /*
71910 * Is this a new hole at the highest possible address?
71911 */
71912@@ -1532,8 +1756,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71913 mm->free_area_cache = addr;
71914
71915 /* dont allow allocations above current base */
71916- if (mm->free_area_cache > mm->mmap_base)
71917+ if (mm->free_area_cache > mm->mmap_base) {
71918 mm->free_area_cache = mm->mmap_base;
71919+ mm->cached_hole_size = ~0UL;
71920+ }
71921 }
71922
71923 unsigned long
71924@@ -1629,6 +1855,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
71925 return vma;
71926 }
71927
71928+#ifdef CONFIG_PAX_SEGMEXEC
71929+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
71930+{
71931+ struct vm_area_struct *vma_m;
71932+
71933+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
71934+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
71935+ BUG_ON(vma->vm_mirror);
71936+ return NULL;
71937+ }
71938+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
71939+ vma_m = vma->vm_mirror;
71940+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
71941+ BUG_ON(vma->vm_file != vma_m->vm_file);
71942+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
71943+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
71944+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
71945+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
71946+ return vma_m;
71947+}
71948+#endif
71949+
71950 /*
71951 * Verify that the stack growth is acceptable and
71952 * update accounting. This is shared with both the
71953@@ -1645,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71954 return -ENOMEM;
71955
71956 /* Stack limit test */
71957+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
71958 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
71959 return -ENOMEM;
71960
71961@@ -1655,6 +1904,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71962 locked = mm->locked_vm + grow;
71963 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
71964 limit >>= PAGE_SHIFT;
71965+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71966 if (locked > limit && !capable(CAP_IPC_LOCK))
71967 return -ENOMEM;
71968 }
71969@@ -1685,37 +1935,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71970 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
71971 * vma is the last one with address > vma->vm_end. Have to extend vma.
71972 */
71973+#ifndef CONFIG_IA64
71974+static
71975+#endif
71976 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71977 {
71978 int error;
71979+ bool locknext;
71980
71981 if (!(vma->vm_flags & VM_GROWSUP))
71982 return -EFAULT;
71983
71984+ /* Also guard against wrapping around to address 0. */
71985+ if (address < PAGE_ALIGN(address+1))
71986+ address = PAGE_ALIGN(address+1);
71987+ else
71988+ return -ENOMEM;
71989+
71990 /*
71991 * We must make sure the anon_vma is allocated
71992 * so that the anon_vma locking is not a noop.
71993 */
71994 if (unlikely(anon_vma_prepare(vma)))
71995 return -ENOMEM;
71996+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
71997+ if (locknext && anon_vma_prepare(vma->vm_next))
71998+ return -ENOMEM;
71999 vma_lock_anon_vma(vma);
72000+ if (locknext)
72001+ vma_lock_anon_vma(vma->vm_next);
72002
72003 /*
72004 * vma->vm_start/vm_end cannot change under us because the caller
72005 * is required to hold the mmap_sem in read mode. We need the
72006- * anon_vma lock to serialize against concurrent expand_stacks.
72007- * Also guard against wrapping around to address 0.
72008+ * anon_vma locks to serialize against concurrent expand_stacks
72009+ * and expand_upwards.
72010 */
72011- if (address < PAGE_ALIGN(address+4))
72012- address = PAGE_ALIGN(address+4);
72013- else {
72014- vma_unlock_anon_vma(vma);
72015- return -ENOMEM;
72016- }
72017 error = 0;
72018
72019 /* Somebody else might have raced and expanded it already */
72020- if (address > vma->vm_end) {
72021+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
72022+ error = -ENOMEM;
72023+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
72024 unsigned long size, grow;
72025
72026 size = address - vma->vm_start;
72027@@ -1730,6 +1991,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72028 }
72029 }
72030 }
72031+ if (locknext)
72032+ vma_unlock_anon_vma(vma->vm_next);
72033 vma_unlock_anon_vma(vma);
72034 khugepaged_enter_vma_merge(vma);
72035 return error;
72036@@ -1743,6 +2006,8 @@ int expand_downwards(struct vm_area_struct *vma,
72037 unsigned long address)
72038 {
72039 int error;
72040+ bool lockprev = false;
72041+ struct vm_area_struct *prev;
72042
72043 /*
72044 * We must make sure the anon_vma is allocated
72045@@ -1756,6 +2021,15 @@ int expand_downwards(struct vm_area_struct *vma,
72046 if (error)
72047 return error;
72048
72049+ prev = vma->vm_prev;
72050+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
72051+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
72052+#endif
72053+ if (lockprev && anon_vma_prepare(prev))
72054+ return -ENOMEM;
72055+ if (lockprev)
72056+ vma_lock_anon_vma(prev);
72057+
72058 vma_lock_anon_vma(vma);
72059
72060 /*
72061@@ -1765,9 +2039,17 @@ int expand_downwards(struct vm_area_struct *vma,
72062 */
72063
72064 /* Somebody else might have raced and expanded it already */
72065- if (address < vma->vm_start) {
72066+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
72067+ error = -ENOMEM;
72068+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
72069 unsigned long size, grow;
72070
72071+#ifdef CONFIG_PAX_SEGMEXEC
72072+ struct vm_area_struct *vma_m;
72073+
72074+ vma_m = pax_find_mirror_vma(vma);
72075+#endif
72076+
72077 size = vma->vm_end - address;
72078 grow = (vma->vm_start - address) >> PAGE_SHIFT;
72079
72080@@ -1777,11 +2059,22 @@ int expand_downwards(struct vm_area_struct *vma,
72081 if (!error) {
72082 vma->vm_start = address;
72083 vma->vm_pgoff -= grow;
72084+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
72085+
72086+#ifdef CONFIG_PAX_SEGMEXEC
72087+ if (vma_m) {
72088+ vma_m->vm_start -= grow << PAGE_SHIFT;
72089+ vma_m->vm_pgoff -= grow;
72090+ }
72091+#endif
72092+
72093 perf_event_mmap(vma);
72094 }
72095 }
72096 }
72097 vma_unlock_anon_vma(vma);
72098+ if (lockprev)
72099+ vma_unlock_anon_vma(prev);
72100 khugepaged_enter_vma_merge(vma);
72101 return error;
72102 }
72103@@ -1851,6 +2144,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
72104 do {
72105 long nrpages = vma_pages(vma);
72106
72107+#ifdef CONFIG_PAX_SEGMEXEC
72108+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
72109+ vma = remove_vma(vma);
72110+ continue;
72111+ }
72112+#endif
72113+
72114 mm->total_vm -= nrpages;
72115 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
72116 vma = remove_vma(vma);
72117@@ -1896,6 +2196,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
72118 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
72119 vma->vm_prev = NULL;
72120 do {
72121+
72122+#ifdef CONFIG_PAX_SEGMEXEC
72123+ if (vma->vm_mirror) {
72124+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
72125+ vma->vm_mirror->vm_mirror = NULL;
72126+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
72127+ vma->vm_mirror = NULL;
72128+ }
72129+#endif
72130+
72131 rb_erase(&vma->vm_rb, &mm->mm_rb);
72132 mm->map_count--;
72133 tail_vma = vma;
72134@@ -1924,14 +2234,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72135 struct vm_area_struct *new;
72136 int err = -ENOMEM;
72137
72138+#ifdef CONFIG_PAX_SEGMEXEC
72139+ struct vm_area_struct *vma_m, *new_m = NULL;
72140+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
72141+#endif
72142+
72143 if (is_vm_hugetlb_page(vma) && (addr &
72144 ~(huge_page_mask(hstate_vma(vma)))))
72145 return -EINVAL;
72146
72147+#ifdef CONFIG_PAX_SEGMEXEC
72148+ vma_m = pax_find_mirror_vma(vma);
72149+#endif
72150+
72151 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72152 if (!new)
72153 goto out_err;
72154
72155+#ifdef CONFIG_PAX_SEGMEXEC
72156+ if (vma_m) {
72157+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72158+ if (!new_m) {
72159+ kmem_cache_free(vm_area_cachep, new);
72160+ goto out_err;
72161+ }
72162+ }
72163+#endif
72164+
72165 /* most fields are the same, copy all, and then fixup */
72166 *new = *vma;
72167
72168@@ -1944,6 +2273,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72169 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
72170 }
72171
72172+#ifdef CONFIG_PAX_SEGMEXEC
72173+ if (vma_m) {
72174+ *new_m = *vma_m;
72175+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
72176+ new_m->vm_mirror = new;
72177+ new->vm_mirror = new_m;
72178+
72179+ if (new_below)
72180+ new_m->vm_end = addr_m;
72181+ else {
72182+ new_m->vm_start = addr_m;
72183+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
72184+ }
72185+ }
72186+#endif
72187+
72188 pol = mpol_dup(vma_policy(vma));
72189 if (IS_ERR(pol)) {
72190 err = PTR_ERR(pol);
72191@@ -1969,6 +2314,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72192 else
72193 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
72194
72195+#ifdef CONFIG_PAX_SEGMEXEC
72196+ if (!err && vma_m) {
72197+ if (anon_vma_clone(new_m, vma_m))
72198+ goto out_free_mpol;
72199+
72200+ mpol_get(pol);
72201+ vma_set_policy(new_m, pol);
72202+
72203+ if (new_m->vm_file) {
72204+ get_file(new_m->vm_file);
72205+ if (vma_m->vm_flags & VM_EXECUTABLE)
72206+ added_exe_file_vma(mm);
72207+ }
72208+
72209+ if (new_m->vm_ops && new_m->vm_ops->open)
72210+ new_m->vm_ops->open(new_m);
72211+
72212+ if (new_below)
72213+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
72214+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
72215+ else
72216+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
72217+
72218+ if (err) {
72219+ if (new_m->vm_ops && new_m->vm_ops->close)
72220+ new_m->vm_ops->close(new_m);
72221+ if (new_m->vm_file) {
72222+ if (vma_m->vm_flags & VM_EXECUTABLE)
72223+ removed_exe_file_vma(mm);
72224+ fput(new_m->vm_file);
72225+ }
72226+ mpol_put(pol);
72227+ }
72228+ }
72229+#endif
72230+
72231 /* Success. */
72232 if (!err)
72233 return 0;
72234@@ -1981,10 +2362,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72235 removed_exe_file_vma(mm);
72236 fput(new->vm_file);
72237 }
72238- unlink_anon_vmas(new);
72239 out_free_mpol:
72240 mpol_put(pol);
72241 out_free_vma:
72242+
72243+#ifdef CONFIG_PAX_SEGMEXEC
72244+ if (new_m) {
72245+ unlink_anon_vmas(new_m);
72246+ kmem_cache_free(vm_area_cachep, new_m);
72247+ }
72248+#endif
72249+
72250+ unlink_anon_vmas(new);
72251 kmem_cache_free(vm_area_cachep, new);
72252 out_err:
72253 return err;
72254@@ -1997,6 +2386,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72255 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72256 unsigned long addr, int new_below)
72257 {
72258+
72259+#ifdef CONFIG_PAX_SEGMEXEC
72260+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
72261+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
72262+ if (mm->map_count >= sysctl_max_map_count-1)
72263+ return -ENOMEM;
72264+ } else
72265+#endif
72266+
72267 if (mm->map_count >= sysctl_max_map_count)
72268 return -ENOMEM;
72269
72270@@ -2008,11 +2406,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72271 * work. This now handles partial unmappings.
72272 * Jeremy Fitzhardinge <jeremy@goop.org>
72273 */
72274+#ifdef CONFIG_PAX_SEGMEXEC
72275 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72276 {
72277+ int ret = __do_munmap(mm, start, len);
72278+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
72279+ return ret;
72280+
72281+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
72282+}
72283+
72284+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72285+#else
72286+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72287+#endif
72288+{
72289 unsigned long end;
72290 struct vm_area_struct *vma, *prev, *last;
72291
72292+ /*
72293+ * mm->mmap_sem is required to protect against another thread
72294+ * changing the mappings in case we sleep.
72295+ */
72296+ verify_mm_writelocked(mm);
72297+
72298 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
72299 return -EINVAL;
72300
72301@@ -2087,6 +2504,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72302 /* Fix up all other VM information */
72303 remove_vma_list(mm, vma);
72304
72305+ track_exec_limit(mm, start, end, 0UL);
72306+
72307 return 0;
72308 }
72309
72310@@ -2099,22 +2518,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
72311
72312 profile_munmap(addr);
72313
72314+#ifdef CONFIG_PAX_SEGMEXEC
72315+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
72316+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
72317+ return -EINVAL;
72318+#endif
72319+
72320 down_write(&mm->mmap_sem);
72321 ret = do_munmap(mm, addr, len);
72322 up_write(&mm->mmap_sem);
72323 return ret;
72324 }
72325
72326-static inline void verify_mm_writelocked(struct mm_struct *mm)
72327-{
72328-#ifdef CONFIG_DEBUG_VM
72329- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72330- WARN_ON(1);
72331- up_read(&mm->mmap_sem);
72332- }
72333-#endif
72334-}
72335-
72336 /*
72337 * this is really a simplified "do_mmap". it only handles
72338 * anonymous maps. eventually we may be able to do some
72339@@ -2128,6 +2543,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72340 struct rb_node ** rb_link, * rb_parent;
72341 pgoff_t pgoff = addr >> PAGE_SHIFT;
72342 int error;
72343+ unsigned long charged;
72344
72345 len = PAGE_ALIGN(len);
72346 if (!len)
72347@@ -2139,16 +2555,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72348
72349 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
72350
72351+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
72352+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
72353+ flags &= ~VM_EXEC;
72354+
72355+#ifdef CONFIG_PAX_MPROTECT
72356+ if (mm->pax_flags & MF_PAX_MPROTECT)
72357+ flags &= ~VM_MAYEXEC;
72358+#endif
72359+
72360+ }
72361+#endif
72362+
72363 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
72364 if (error & ~PAGE_MASK)
72365 return error;
72366
72367+ charged = len >> PAGE_SHIFT;
72368+
72369 /*
72370 * mlock MCL_FUTURE?
72371 */
72372 if (mm->def_flags & VM_LOCKED) {
72373 unsigned long locked, lock_limit;
72374- locked = len >> PAGE_SHIFT;
72375+ locked = charged;
72376 locked += mm->locked_vm;
72377 lock_limit = rlimit(RLIMIT_MEMLOCK);
72378 lock_limit >>= PAGE_SHIFT;
72379@@ -2165,22 +2595,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72380 /*
72381 * Clear old maps. this also does some error checking for us
72382 */
72383- munmap_back:
72384 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72385 if (vma && vma->vm_start < addr + len) {
72386 if (do_munmap(mm, addr, len))
72387 return -ENOMEM;
72388- goto munmap_back;
72389+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72390+ BUG_ON(vma && vma->vm_start < addr + len);
72391 }
72392
72393 /* Check against address space limits *after* clearing old maps... */
72394- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
72395+ if (!may_expand_vm(mm, charged))
72396 return -ENOMEM;
72397
72398 if (mm->map_count > sysctl_max_map_count)
72399 return -ENOMEM;
72400
72401- if (security_vm_enough_memory(len >> PAGE_SHIFT))
72402+ if (security_vm_enough_memory(charged))
72403 return -ENOMEM;
72404
72405 /* Can we just expand an old private anonymous mapping? */
72406@@ -2194,7 +2624,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72407 */
72408 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72409 if (!vma) {
72410- vm_unacct_memory(len >> PAGE_SHIFT);
72411+ vm_unacct_memory(charged);
72412 return -ENOMEM;
72413 }
72414
72415@@ -2208,11 +2638,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72416 vma_link(mm, vma, prev, rb_link, rb_parent);
72417 out:
72418 perf_event_mmap(vma);
72419- mm->total_vm += len >> PAGE_SHIFT;
72420+ mm->total_vm += charged;
72421 if (flags & VM_LOCKED) {
72422 if (!mlock_vma_pages_range(vma, addr, addr + len))
72423- mm->locked_vm += (len >> PAGE_SHIFT);
72424+ mm->locked_vm += charged;
72425 }
72426+ track_exec_limit(mm, addr, addr + len, flags);
72427 return addr;
72428 }
72429
72430@@ -2259,8 +2690,10 @@ void exit_mmap(struct mm_struct *mm)
72431 * Walk the list again, actually closing and freeing it,
72432 * with preemption enabled, without holding any MM locks.
72433 */
72434- while (vma)
72435+ while (vma) {
72436+ vma->vm_mirror = NULL;
72437 vma = remove_vma(vma);
72438+ }
72439
72440 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
72441 }
72442@@ -2274,6 +2707,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72443 struct vm_area_struct * __vma, * prev;
72444 struct rb_node ** rb_link, * rb_parent;
72445
72446+#ifdef CONFIG_PAX_SEGMEXEC
72447+ struct vm_area_struct *vma_m = NULL;
72448+#endif
72449+
72450+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
72451+ return -EPERM;
72452+
72453 /*
72454 * The vm_pgoff of a purely anonymous vma should be irrelevant
72455 * until its first write fault, when page's anon_vma and index
72456@@ -2296,7 +2736,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72457 if ((vma->vm_flags & VM_ACCOUNT) &&
72458 security_vm_enough_memory_mm(mm, vma_pages(vma)))
72459 return -ENOMEM;
72460+
72461+#ifdef CONFIG_PAX_SEGMEXEC
72462+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
72463+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72464+ if (!vma_m)
72465+ return -ENOMEM;
72466+ }
72467+#endif
72468+
72469 vma_link(mm, vma, prev, rb_link, rb_parent);
72470+
72471+#ifdef CONFIG_PAX_SEGMEXEC
72472+ if (vma_m)
72473+ BUG_ON(pax_mirror_vma(vma_m, vma));
72474+#endif
72475+
72476 return 0;
72477 }
72478
72479@@ -2315,6 +2770,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72480 struct mempolicy *pol;
72481 bool faulted_in_anon_vma = true;
72482
72483+ BUG_ON(vma->vm_mirror);
72484+
72485 /*
72486 * If anonymous vma has not yet been faulted, update new pgoff
72487 * to match new location, to increase its chance of merging.
72488@@ -2382,6 +2839,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72489 return NULL;
72490 }
72491
72492+#ifdef CONFIG_PAX_SEGMEXEC
72493+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
72494+{
72495+ struct vm_area_struct *prev_m;
72496+ struct rb_node **rb_link_m, *rb_parent_m;
72497+ struct mempolicy *pol_m;
72498+
72499+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
72500+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
72501+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
72502+ *vma_m = *vma;
72503+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
72504+ if (anon_vma_clone(vma_m, vma))
72505+ return -ENOMEM;
72506+ pol_m = vma_policy(vma_m);
72507+ mpol_get(pol_m);
72508+ vma_set_policy(vma_m, pol_m);
72509+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
72510+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
72511+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
72512+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
72513+ if (vma_m->vm_file)
72514+ get_file(vma_m->vm_file);
72515+ if (vma_m->vm_ops && vma_m->vm_ops->open)
72516+ vma_m->vm_ops->open(vma_m);
72517+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
72518+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
72519+ vma_m->vm_mirror = vma;
72520+ vma->vm_mirror = vma_m;
72521+ return 0;
72522+}
72523+#endif
72524+
72525 /*
72526 * Return true if the calling process may expand its vm space by the passed
72527 * number of pages
72528@@ -2393,6 +2883,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
72529
72530 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
72531
72532+#ifdef CONFIG_PAX_RANDMMAP
72533+ if (mm->pax_flags & MF_PAX_RANDMMAP)
72534+ cur -= mm->brk_gap;
72535+#endif
72536+
72537+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
72538 if (cur + npages > lim)
72539 return 0;
72540 return 1;
72541@@ -2463,6 +2959,22 @@ int install_special_mapping(struct mm_struct *mm,
72542 vma->vm_start = addr;
72543 vma->vm_end = addr + len;
72544
72545+#ifdef CONFIG_PAX_MPROTECT
72546+ if (mm->pax_flags & MF_PAX_MPROTECT) {
72547+#ifndef CONFIG_PAX_MPROTECT_COMPAT
72548+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
72549+ return -EPERM;
72550+ if (!(vm_flags & VM_EXEC))
72551+ vm_flags &= ~VM_MAYEXEC;
72552+#else
72553+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72554+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72555+#endif
72556+ else
72557+ vm_flags &= ~VM_MAYWRITE;
72558+ }
72559+#endif
72560+
72561 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
72562 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72563
72564diff --git a/mm/mprotect.c b/mm/mprotect.c
72565index f437d05..e3763f6 100644
72566--- a/mm/mprotect.c
72567+++ b/mm/mprotect.c
72568@@ -23,10 +23,16 @@
72569 #include <linux/mmu_notifier.h>
72570 #include <linux/migrate.h>
72571 #include <linux/perf_event.h>
72572+
72573+#ifdef CONFIG_PAX_MPROTECT
72574+#include <linux/elf.h>
72575+#endif
72576+
72577 #include <asm/uaccess.h>
72578 #include <asm/pgtable.h>
72579 #include <asm/cacheflush.h>
72580 #include <asm/tlbflush.h>
72581+#include <asm/mmu_context.h>
72582
72583 #ifndef pgprot_modify
72584 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
72585@@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
72586 flush_tlb_range(vma, start, end);
72587 }
72588
72589+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72590+/* called while holding the mmap semaphor for writing except stack expansion */
72591+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
72592+{
72593+ unsigned long oldlimit, newlimit = 0UL;
72594+
72595+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
72596+ return;
72597+
72598+ spin_lock(&mm->page_table_lock);
72599+ oldlimit = mm->context.user_cs_limit;
72600+ if ((prot & VM_EXEC) && oldlimit < end)
72601+ /* USER_CS limit moved up */
72602+ newlimit = end;
72603+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
72604+ /* USER_CS limit moved down */
72605+ newlimit = start;
72606+
72607+ if (newlimit) {
72608+ mm->context.user_cs_limit = newlimit;
72609+
72610+#ifdef CONFIG_SMP
72611+ wmb();
72612+ cpus_clear(mm->context.cpu_user_cs_mask);
72613+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
72614+#endif
72615+
72616+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
72617+ }
72618+ spin_unlock(&mm->page_table_lock);
72619+ if (newlimit == end) {
72620+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
72621+
72622+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
72623+ if (is_vm_hugetlb_page(vma))
72624+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
72625+ else
72626+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
72627+ }
72628+}
72629+#endif
72630+
72631 int
72632 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72633 unsigned long start, unsigned long end, unsigned long newflags)
72634@@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72635 int error;
72636 int dirty_accountable = 0;
72637
72638+#ifdef CONFIG_PAX_SEGMEXEC
72639+ struct vm_area_struct *vma_m = NULL;
72640+ unsigned long start_m, end_m;
72641+
72642+ start_m = start + SEGMEXEC_TASK_SIZE;
72643+ end_m = end + SEGMEXEC_TASK_SIZE;
72644+#endif
72645+
72646 if (newflags == oldflags) {
72647 *pprev = vma;
72648 return 0;
72649 }
72650
72651+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
72652+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
72653+
72654+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
72655+ return -ENOMEM;
72656+
72657+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
72658+ return -ENOMEM;
72659+ }
72660+
72661 /*
72662 * If we make a private mapping writable we increase our commit;
72663 * but (without finer accounting) cannot reduce our commit if we
72664@@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72665 }
72666 }
72667
72668+#ifdef CONFIG_PAX_SEGMEXEC
72669+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
72670+ if (start != vma->vm_start) {
72671+ error = split_vma(mm, vma, start, 1);
72672+ if (error)
72673+ goto fail;
72674+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
72675+ *pprev = (*pprev)->vm_next;
72676+ }
72677+
72678+ if (end != vma->vm_end) {
72679+ error = split_vma(mm, vma, end, 0);
72680+ if (error)
72681+ goto fail;
72682+ }
72683+
72684+ if (pax_find_mirror_vma(vma)) {
72685+ error = __do_munmap(mm, start_m, end_m - start_m);
72686+ if (error)
72687+ goto fail;
72688+ } else {
72689+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72690+ if (!vma_m) {
72691+ error = -ENOMEM;
72692+ goto fail;
72693+ }
72694+ vma->vm_flags = newflags;
72695+ error = pax_mirror_vma(vma_m, vma);
72696+ if (error) {
72697+ vma->vm_flags = oldflags;
72698+ goto fail;
72699+ }
72700+ }
72701+ }
72702+#endif
72703+
72704 /*
72705 * First try to merge with previous and/or next vma.
72706 */
72707@@ -204,9 +306,21 @@ success:
72708 * vm_flags and vm_page_prot are protected by the mmap_sem
72709 * held in write mode.
72710 */
72711+
72712+#ifdef CONFIG_PAX_SEGMEXEC
72713+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
72714+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
72715+#endif
72716+
72717 vma->vm_flags = newflags;
72718+
72719+#ifdef CONFIG_PAX_MPROTECT
72720+ if (mm->binfmt && mm->binfmt->handle_mprotect)
72721+ mm->binfmt->handle_mprotect(vma, newflags);
72722+#endif
72723+
72724 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
72725- vm_get_page_prot(newflags));
72726+ vm_get_page_prot(vma->vm_flags));
72727
72728 if (vma_wants_writenotify(vma)) {
72729 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
72730@@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72731 end = start + len;
72732 if (end <= start)
72733 return -ENOMEM;
72734+
72735+#ifdef CONFIG_PAX_SEGMEXEC
72736+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
72737+ if (end > SEGMEXEC_TASK_SIZE)
72738+ return -EINVAL;
72739+ } else
72740+#endif
72741+
72742+ if (end > TASK_SIZE)
72743+ return -EINVAL;
72744+
72745 if (!arch_validate_prot(prot))
72746 return -EINVAL;
72747
72748@@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72749 /*
72750 * Does the application expect PROT_READ to imply PROT_EXEC:
72751 */
72752- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72753+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72754 prot |= PROT_EXEC;
72755
72756 vm_flags = calc_vm_prot_bits(prot);
72757@@ -288,6 +413,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72758 if (start > vma->vm_start)
72759 prev = vma;
72760
72761+#ifdef CONFIG_PAX_MPROTECT
72762+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
72763+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
72764+#endif
72765+
72766 for (nstart = start ; ; ) {
72767 unsigned long newflags;
72768
72769@@ -297,6 +427,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72770
72771 /* newflags >> 4 shift VM_MAY% in place of VM_% */
72772 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
72773+ if (prot & (PROT_WRITE | PROT_EXEC))
72774+ gr_log_rwxmprotect(vma->vm_file);
72775+
72776+ error = -EACCES;
72777+ goto out;
72778+ }
72779+
72780+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72781 error = -EACCES;
72782 goto out;
72783 }
72784@@ -311,6 +449,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72785 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
72786 if (error)
72787 goto out;
72788+
72789+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
72790+
72791 nstart = tmp;
72792
72793 if (nstart < prev->vm_end)
72794diff --git a/mm/mremap.c b/mm/mremap.c
72795index 87bb839..c3bfadb 100644
72796--- a/mm/mremap.c
72797+++ b/mm/mremap.c
72798@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
72799 continue;
72800 pte = ptep_get_and_clear(mm, old_addr, old_pte);
72801 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72802+
72803+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72804+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
72805+ pte = pte_exprotect(pte);
72806+#endif
72807+
72808 set_pte_at(mm, new_addr, new_pte, pte);
72809 }
72810
72811@@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
72812 if (is_vm_hugetlb_page(vma))
72813 goto Einval;
72814
72815+#ifdef CONFIG_PAX_SEGMEXEC
72816+ if (pax_find_mirror_vma(vma))
72817+ goto Einval;
72818+#endif
72819+
72820 /* We can't remap across vm area boundaries */
72821 if (old_len > vma->vm_end - addr)
72822 goto Efault;
72823@@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
72824 unsigned long ret = -EINVAL;
72825 unsigned long charged = 0;
72826 unsigned long map_flags;
72827+ unsigned long pax_task_size = TASK_SIZE;
72828
72829 if (new_addr & ~PAGE_MASK)
72830 goto out;
72831
72832- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72833+#ifdef CONFIG_PAX_SEGMEXEC
72834+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
72835+ pax_task_size = SEGMEXEC_TASK_SIZE;
72836+#endif
72837+
72838+ pax_task_size -= PAGE_SIZE;
72839+
72840+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72841 goto out;
72842
72843 /* Check if the location we're moving into overlaps the
72844 * old location at all, and fail if it does.
72845 */
72846- if ((new_addr <= addr) && (new_addr+new_len) > addr)
72847- goto out;
72848-
72849- if ((addr <= new_addr) && (addr+old_len) > new_addr)
72850+ if (addr + old_len > new_addr && new_addr + new_len > addr)
72851 goto out;
72852
72853 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72854@@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
72855 struct vm_area_struct *vma;
72856 unsigned long ret = -EINVAL;
72857 unsigned long charged = 0;
72858+ unsigned long pax_task_size = TASK_SIZE;
72859
72860 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72861 goto out;
72862@@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
72863 if (!new_len)
72864 goto out;
72865
72866+#ifdef CONFIG_PAX_SEGMEXEC
72867+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
72868+ pax_task_size = SEGMEXEC_TASK_SIZE;
72869+#endif
72870+
72871+ pax_task_size -= PAGE_SIZE;
72872+
72873+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72874+ old_len > pax_task_size || addr > pax_task_size-old_len)
72875+ goto out;
72876+
72877 if (flags & MREMAP_FIXED) {
72878 if (flags & MREMAP_MAYMOVE)
72879 ret = mremap_to(addr, old_len, new_addr, new_len);
72880@@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
72881 addr + new_len);
72882 }
72883 ret = addr;
72884+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72885 goto out;
72886 }
72887 }
72888@@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
72889 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72890 if (ret)
72891 goto out;
72892+
72893+ map_flags = vma->vm_flags;
72894 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72895+ if (!(ret & ~PAGE_MASK)) {
72896+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72897+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72898+ }
72899 }
72900 out:
72901 if (ret & ~PAGE_MASK)
72902diff --git a/mm/nommu.c b/mm/nommu.c
72903index f59e170..34e2a2b 100644
72904--- a/mm/nommu.c
72905+++ b/mm/nommu.c
72906@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72907 int sysctl_overcommit_ratio = 50; /* default is 50% */
72908 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
72909 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
72910-int heap_stack_gap = 0;
72911
72912 atomic_long_t mmap_pages_allocated;
72913
72914@@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
72915 EXPORT_SYMBOL(find_vma);
72916
72917 /*
72918- * find a VMA
72919- * - we don't extend stack VMAs under NOMMU conditions
72920- */
72921-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
72922-{
72923- return find_vma(mm, addr);
72924-}
72925-
72926-/*
72927 * expand a stack to a given address
72928 * - not supported under NOMMU conditions
72929 */
72930@@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72931
72932 /* most fields are the same, copy all, and then fixup */
72933 *new = *vma;
72934+ INIT_LIST_HEAD(&new->anon_vma_chain);
72935 *region = *vma->vm_region;
72936 new->vm_region = region;
72937
72938diff --git a/mm/page_alloc.c b/mm/page_alloc.c
72939index a13ded1..b949d15 100644
72940--- a/mm/page_alloc.c
72941+++ b/mm/page_alloc.c
72942@@ -335,7 +335,7 @@ out:
72943 * This usage means that zero-order pages may not be compound.
72944 */
72945
72946-static void free_compound_page(struct page *page)
72947+void free_compound_page(struct page *page)
72948 {
72949 __free_pages_ok(page, compound_order(page));
72950 }
72951@@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72952 int i;
72953 int bad = 0;
72954
72955+#ifdef CONFIG_PAX_MEMORY_SANITIZE
72956+ unsigned long index = 1UL << order;
72957+#endif
72958+
72959 trace_mm_page_free(page, order);
72960 kmemcheck_free_shadow(page, order);
72961
72962@@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72963 debug_check_no_obj_freed(page_address(page),
72964 PAGE_SIZE << order);
72965 }
72966+
72967+#ifdef CONFIG_PAX_MEMORY_SANITIZE
72968+ for (; index; --index)
72969+ sanitize_highpage(page + index - 1);
72970+#endif
72971+
72972 arch_free_page(page, order);
72973 kernel_map_pages(page, 1 << order, 0);
72974
72975@@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
72976 arch_alloc_page(page, order);
72977 kernel_map_pages(page, 1 << order, 1);
72978
72979+#ifndef CONFIG_PAX_MEMORY_SANITIZE
72980 if (gfp_flags & __GFP_ZERO)
72981 prep_zero_page(page, order, gfp_flags);
72982+#endif
72983
72984 if (order && (gfp_flags & __GFP_COMP))
72985 prep_compound_page(page, order);
72986@@ -3468,7 +3480,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
72987 unsigned long pfn;
72988
72989 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
72990+#ifdef CONFIG_X86_32
72991+ /* boot failures in VMware 8 on 32bit vanilla since
72992+ this change */
72993+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
72994+#else
72995 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
72996+#endif
72997 return 1;
72998 }
72999 return 0;
73000diff --git a/mm/percpu.c b/mm/percpu.c
73001index f47af91..7eeef99 100644
73002--- a/mm/percpu.c
73003+++ b/mm/percpu.c
73004@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
73005 static unsigned int pcpu_high_unit_cpu __read_mostly;
73006
73007 /* the address of the first chunk which starts with the kernel static area */
73008-void *pcpu_base_addr __read_mostly;
73009+void *pcpu_base_addr __read_only;
73010 EXPORT_SYMBOL_GPL(pcpu_base_addr);
73011
73012 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
73013diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
73014index c20ff48..137702a 100644
73015--- a/mm/process_vm_access.c
73016+++ b/mm/process_vm_access.c
73017@@ -13,6 +13,7 @@
73018 #include <linux/uio.h>
73019 #include <linux/sched.h>
73020 #include <linux/highmem.h>
73021+#include <linux/security.h>
73022 #include <linux/ptrace.h>
73023 #include <linux/slab.h>
73024 #include <linux/syscalls.h>
73025@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
73026 size_t iov_l_curr_offset = 0;
73027 ssize_t iov_len;
73028
73029+ return -ENOSYS; // PaX: until properly audited
73030+
73031 /*
73032 * Work out how many pages of struct pages we're going to need
73033 * when eventually calling get_user_pages
73034 */
73035 for (i = 0; i < riovcnt; i++) {
73036 iov_len = rvec[i].iov_len;
73037- if (iov_len > 0) {
73038- nr_pages_iov = ((unsigned long)rvec[i].iov_base
73039- + iov_len)
73040- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
73041- / PAGE_SIZE + 1;
73042- nr_pages = max(nr_pages, nr_pages_iov);
73043- }
73044+ if (iov_len <= 0)
73045+ continue;
73046+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
73047+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
73048+ nr_pages = max(nr_pages, nr_pages_iov);
73049 }
73050
73051 if (nr_pages == 0)
73052@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
73053 goto free_proc_pages;
73054 }
73055
73056+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
73057+ rc = -EPERM;
73058+ goto put_task_struct;
73059+ }
73060+
73061 mm = mm_access(task, PTRACE_MODE_ATTACH);
73062 if (!mm || IS_ERR(mm)) {
73063 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
73064diff --git a/mm/rmap.c b/mm/rmap.c
73065index c8454e0..b04f3a2 100644
73066--- a/mm/rmap.c
73067+++ b/mm/rmap.c
73068@@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73069 struct anon_vma *anon_vma = vma->anon_vma;
73070 struct anon_vma_chain *avc;
73071
73072+#ifdef CONFIG_PAX_SEGMEXEC
73073+ struct anon_vma_chain *avc_m = NULL;
73074+#endif
73075+
73076 might_sleep();
73077 if (unlikely(!anon_vma)) {
73078 struct mm_struct *mm = vma->vm_mm;
73079@@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73080 if (!avc)
73081 goto out_enomem;
73082
73083+#ifdef CONFIG_PAX_SEGMEXEC
73084+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
73085+ if (!avc_m)
73086+ goto out_enomem_free_avc;
73087+#endif
73088+
73089 anon_vma = find_mergeable_anon_vma(vma);
73090 allocated = NULL;
73091 if (!anon_vma) {
73092@@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73093 /* page_table_lock to protect against threads */
73094 spin_lock(&mm->page_table_lock);
73095 if (likely(!vma->anon_vma)) {
73096+
73097+#ifdef CONFIG_PAX_SEGMEXEC
73098+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
73099+
73100+ if (vma_m) {
73101+ BUG_ON(vma_m->anon_vma);
73102+ vma_m->anon_vma = anon_vma;
73103+ avc_m->anon_vma = anon_vma;
73104+ avc_m->vma = vma;
73105+ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
73106+ list_add(&avc_m->same_anon_vma, &anon_vma->head);
73107+ avc_m = NULL;
73108+ }
73109+#endif
73110+
73111 vma->anon_vma = anon_vma;
73112 avc->anon_vma = anon_vma;
73113 avc->vma = vma;
73114@@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73115
73116 if (unlikely(allocated))
73117 put_anon_vma(allocated);
73118+
73119+#ifdef CONFIG_PAX_SEGMEXEC
73120+ if (unlikely(avc_m))
73121+ anon_vma_chain_free(avc_m);
73122+#endif
73123+
73124 if (unlikely(avc))
73125 anon_vma_chain_free(avc);
73126 }
73127 return 0;
73128
73129 out_enomem_free_avc:
73130+
73131+#ifdef CONFIG_PAX_SEGMEXEC
73132+ if (avc_m)
73133+ anon_vma_chain_free(avc_m);
73134+#endif
73135+
73136 anon_vma_chain_free(avc);
73137 out_enomem:
73138 return -ENOMEM;
73139@@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
73140 * Attach the anon_vmas from src to dst.
73141 * Returns 0 on success, -ENOMEM on failure.
73142 */
73143-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
73144+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
73145 {
73146 struct anon_vma_chain *avc, *pavc;
73147 struct anon_vma *root = NULL;
73148@@ -321,7 +358,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
73149 * the corresponding VMA in the parent process is attached to.
73150 * Returns 0 on success, non-zero on failure.
73151 */
73152-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
73153+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
73154 {
73155 struct anon_vma_chain *avc;
73156 struct anon_vma *anon_vma;
73157diff --git a/mm/shmem.c b/mm/shmem.c
73158index 269d049..a9d2b50 100644
73159--- a/mm/shmem.c
73160+++ b/mm/shmem.c
73161@@ -31,7 +31,7 @@
73162 #include <linux/export.h>
73163 #include <linux/swap.h>
73164
73165-static struct vfsmount *shm_mnt;
73166+struct vfsmount *shm_mnt;
73167
73168 #ifdef CONFIG_SHMEM
73169 /*
73170@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
73171 #define BOGO_DIRENT_SIZE 20
73172
73173 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
73174-#define SHORT_SYMLINK_LEN 128
73175+#define SHORT_SYMLINK_LEN 64
73176
73177 struct shmem_xattr {
73178 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
73179@@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
73180 int err = -ENOMEM;
73181
73182 /* Round up to L1_CACHE_BYTES to resist false sharing */
73183- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
73184- L1_CACHE_BYTES), GFP_KERNEL);
73185+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
73186 if (!sbinfo)
73187 return -ENOMEM;
73188
73189diff --git a/mm/slab.c b/mm/slab.c
73190index f0bd785..348b96a 100644
73191--- a/mm/slab.c
73192+++ b/mm/slab.c
73193@@ -153,7 +153,7 @@
73194
73195 /* Legal flag mask for kmem_cache_create(). */
73196 #if DEBUG
73197-# define CREATE_MASK (SLAB_RED_ZONE | \
73198+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
73199 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
73200 SLAB_CACHE_DMA | \
73201 SLAB_STORE_USER | \
73202@@ -161,7 +161,7 @@
73203 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73204 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
73205 #else
73206-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
73207+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
73208 SLAB_CACHE_DMA | \
73209 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
73210 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73211@@ -290,7 +290,7 @@ struct kmem_list3 {
73212 * Need this for bootstrapping a per node allocator.
73213 */
73214 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
73215-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
73216+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
73217 #define CACHE_CACHE 0
73218 #define SIZE_AC MAX_NUMNODES
73219 #define SIZE_L3 (2 * MAX_NUMNODES)
73220@@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
73221 if ((x)->max_freeable < i) \
73222 (x)->max_freeable = i; \
73223 } while (0)
73224-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
73225-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
73226-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
73227-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
73228+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
73229+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
73230+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
73231+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
73232 #else
73233 #define STATS_INC_ACTIVE(x) do { } while (0)
73234 #define STATS_DEC_ACTIVE(x) do { } while (0)
73235@@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
73236 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
73237 */
73238 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
73239- const struct slab *slab, void *obj)
73240+ const struct slab *slab, const void *obj)
73241 {
73242 u32 offset = (obj - slab->s_mem);
73243 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
73244@@ -568,7 +568,7 @@ struct cache_names {
73245 static struct cache_names __initdata cache_names[] = {
73246 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
73247 #include <linux/kmalloc_sizes.h>
73248- {NULL,}
73249+ {NULL}
73250 #undef CACHE
73251 };
73252
73253@@ -1588,7 +1588,7 @@ void __init kmem_cache_init(void)
73254 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
73255 sizes[INDEX_AC].cs_size,
73256 ARCH_KMALLOC_MINALIGN,
73257- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73258+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73259 NULL);
73260
73261 if (INDEX_AC != INDEX_L3) {
73262@@ -1596,7 +1596,7 @@ void __init kmem_cache_init(void)
73263 kmem_cache_create(names[INDEX_L3].name,
73264 sizes[INDEX_L3].cs_size,
73265 ARCH_KMALLOC_MINALIGN,
73266- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73267+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73268 NULL);
73269 }
73270
73271@@ -1614,7 +1614,7 @@ void __init kmem_cache_init(void)
73272 sizes->cs_cachep = kmem_cache_create(names->name,
73273 sizes->cs_size,
73274 ARCH_KMALLOC_MINALIGN,
73275- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73276+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73277 NULL);
73278 }
73279 #ifdef CONFIG_ZONE_DMA
73280@@ -4339,10 +4339,10 @@ static int s_show(struct seq_file *m, void *p)
73281 }
73282 /* cpu stats */
73283 {
73284- unsigned long allochit = atomic_read(&cachep->allochit);
73285- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
73286- unsigned long freehit = atomic_read(&cachep->freehit);
73287- unsigned long freemiss = atomic_read(&cachep->freemiss);
73288+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
73289+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
73290+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
73291+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
73292
73293 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
73294 allochit, allocmiss, freehit, freemiss);
73295@@ -4601,13 +4601,62 @@ static int __init slab_proc_init(void)
73296 {
73297 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
73298 #ifdef CONFIG_DEBUG_SLAB_LEAK
73299- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
73300+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
73301 #endif
73302 return 0;
73303 }
73304 module_init(slab_proc_init);
73305 #endif
73306
73307+void check_object_size(const void *ptr, unsigned long n, bool to)
73308+{
73309+
73310+#ifdef CONFIG_PAX_USERCOPY
73311+ struct page *page;
73312+ struct kmem_cache *cachep = NULL;
73313+ struct slab *slabp;
73314+ unsigned int objnr;
73315+ unsigned long offset;
73316+ const char *type;
73317+
73318+ if (!n)
73319+ return;
73320+
73321+ type = "<null>";
73322+ if (ZERO_OR_NULL_PTR(ptr))
73323+ goto report;
73324+
73325+ if (!virt_addr_valid(ptr))
73326+ return;
73327+
73328+ page = virt_to_head_page(ptr);
73329+
73330+ type = "<process stack>";
73331+ if (!PageSlab(page)) {
73332+ if (object_is_on_stack(ptr, n) == -1)
73333+ goto report;
73334+ return;
73335+ }
73336+
73337+ cachep = page_get_cache(page);
73338+ type = cachep->name;
73339+ if (!(cachep->flags & SLAB_USERCOPY))
73340+ goto report;
73341+
73342+ slabp = page_get_slab(page);
73343+ objnr = obj_to_index(cachep, slabp, ptr);
73344+ BUG_ON(objnr >= cachep->num);
73345+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
73346+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
73347+ return;
73348+
73349+report:
73350+ pax_report_usercopy(ptr, n, to, type);
73351+#endif
73352+
73353+}
73354+EXPORT_SYMBOL(check_object_size);
73355+
73356 /**
73357 * ksize - get the actual amount of memory allocated for a given object
73358 * @objp: Pointer to the object
73359diff --git a/mm/slob.c b/mm/slob.c
73360index 8105be4..e045f96 100644
73361--- a/mm/slob.c
73362+++ b/mm/slob.c
73363@@ -29,7 +29,7 @@
73364 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
73365 * alloc_pages() directly, allocating compound pages so the page order
73366 * does not have to be separately tracked, and also stores the exact
73367- * allocation size in page->private so that it can be used to accurately
73368+ * allocation size in slob_page->size so that it can be used to accurately
73369 * provide ksize(). These objects are detected in kfree() because slob_page()
73370 * is false for them.
73371 *
73372@@ -58,6 +58,7 @@
73373 */
73374
73375 #include <linux/kernel.h>
73376+#include <linux/sched.h>
73377 #include <linux/slab.h>
73378 #include <linux/mm.h>
73379 #include <linux/swap.h> /* struct reclaim_state */
73380@@ -102,7 +103,8 @@ struct slob_page {
73381 unsigned long flags; /* mandatory */
73382 atomic_t _count; /* mandatory */
73383 slobidx_t units; /* free units left in page */
73384- unsigned long pad[2];
73385+ unsigned long pad[1];
73386+ unsigned long size; /* size when >=PAGE_SIZE */
73387 slob_t *free; /* first free slob_t in page */
73388 struct list_head list; /* linked list of free pages */
73389 };
73390@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
73391 */
73392 static inline int is_slob_page(struct slob_page *sp)
73393 {
73394- return PageSlab((struct page *)sp);
73395+ return PageSlab((struct page *)sp) && !sp->size;
73396 }
73397
73398 static inline void set_slob_page(struct slob_page *sp)
73399@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
73400
73401 static inline struct slob_page *slob_page(const void *addr)
73402 {
73403- return (struct slob_page *)virt_to_page(addr);
73404+ return (struct slob_page *)virt_to_head_page(addr);
73405 }
73406
73407 /*
73408@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
73409 /*
73410 * Return the size of a slob block.
73411 */
73412-static slobidx_t slob_units(slob_t *s)
73413+static slobidx_t slob_units(const slob_t *s)
73414 {
73415 if (s->units > 0)
73416 return s->units;
73417@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
73418 /*
73419 * Return the next free slob block pointer after this one.
73420 */
73421-static slob_t *slob_next(slob_t *s)
73422+static slob_t *slob_next(const slob_t *s)
73423 {
73424 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
73425 slobidx_t next;
73426@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
73427 /*
73428 * Returns true if s is the last free block in its page.
73429 */
73430-static int slob_last(slob_t *s)
73431+static int slob_last(const slob_t *s)
73432 {
73433 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
73434 }
73435@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
73436 if (!page)
73437 return NULL;
73438
73439+ set_slob_page(page);
73440 return page_address(page);
73441 }
73442
73443@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
73444 if (!b)
73445 return NULL;
73446 sp = slob_page(b);
73447- set_slob_page(sp);
73448
73449 spin_lock_irqsave(&slob_lock, flags);
73450 sp->units = SLOB_UNITS(PAGE_SIZE);
73451 sp->free = b;
73452+ sp->size = 0;
73453 INIT_LIST_HEAD(&sp->list);
73454 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
73455 set_slob_page_free(sp, slob_list);
73456@@ -476,10 +479,9 @@ out:
73457 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
73458 */
73459
73460-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73461+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
73462 {
73463- unsigned int *m;
73464- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73465+ slob_t *m;
73466 void *ret;
73467
73468 gfp &= gfp_allowed_mask;
73469@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73470
73471 if (!m)
73472 return NULL;
73473- *m = size;
73474+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
73475+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
73476+ m[0].units = size;
73477+ m[1].units = align;
73478 ret = (void *)m + align;
73479
73480 trace_kmalloc_node(_RET_IP_, ret,
73481@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73482 gfp |= __GFP_COMP;
73483 ret = slob_new_pages(gfp, order, node);
73484 if (ret) {
73485- struct page *page;
73486- page = virt_to_page(ret);
73487- page->private = size;
73488+ struct slob_page *sp;
73489+ sp = slob_page(ret);
73490+ sp->size = size;
73491 }
73492
73493 trace_kmalloc_node(_RET_IP_, ret,
73494 size, PAGE_SIZE << order, gfp, node);
73495 }
73496
73497- kmemleak_alloc(ret, size, 1, gfp);
73498+ return ret;
73499+}
73500+
73501+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73502+{
73503+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73504+ void *ret = __kmalloc_node_align(size, gfp, node, align);
73505+
73506+ if (!ZERO_OR_NULL_PTR(ret))
73507+ kmemleak_alloc(ret, size, 1, gfp);
73508 return ret;
73509 }
73510 EXPORT_SYMBOL(__kmalloc_node);
73511@@ -533,13 +547,92 @@ void kfree(const void *block)
73512 sp = slob_page(block);
73513 if (is_slob_page(sp)) {
73514 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73515- unsigned int *m = (unsigned int *)(block - align);
73516- slob_free(m, *m + align);
73517- } else
73518+ slob_t *m = (slob_t *)(block - align);
73519+ slob_free(m, m[0].units + align);
73520+ } else {
73521+ clear_slob_page(sp);
73522+ free_slob_page(sp);
73523+ sp->size = 0;
73524 put_page(&sp->page);
73525+ }
73526 }
73527 EXPORT_SYMBOL(kfree);
73528
73529+void check_object_size(const void *ptr, unsigned long n, bool to)
73530+{
73531+
73532+#ifdef CONFIG_PAX_USERCOPY
73533+ struct slob_page *sp;
73534+ const slob_t *free;
73535+ const void *base;
73536+ unsigned long flags;
73537+ const char *type;
73538+
73539+ if (!n)
73540+ return;
73541+
73542+ type = "<null>";
73543+ if (ZERO_OR_NULL_PTR(ptr))
73544+ goto report;
73545+
73546+ if (!virt_addr_valid(ptr))
73547+ return;
73548+
73549+ type = "<process stack>";
73550+ sp = slob_page(ptr);
73551+ if (!PageSlab((struct page *)sp)) {
73552+ if (object_is_on_stack(ptr, n) == -1)
73553+ goto report;
73554+ return;
73555+ }
73556+
73557+ type = "<slob>";
73558+ if (sp->size) {
73559+ base = page_address(&sp->page);
73560+ if (base <= ptr && n <= sp->size - (ptr - base))
73561+ return;
73562+ goto report;
73563+ }
73564+
73565+ /* some tricky double walking to find the chunk */
73566+ spin_lock_irqsave(&slob_lock, flags);
73567+ base = (void *)((unsigned long)ptr & PAGE_MASK);
73568+ free = sp->free;
73569+
73570+ while (!slob_last(free) && (void *)free <= ptr) {
73571+ base = free + slob_units(free);
73572+ free = slob_next(free);
73573+ }
73574+
73575+ while (base < (void *)free) {
73576+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
73577+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
73578+ int offset;
73579+
73580+ if (ptr < base + align)
73581+ break;
73582+
73583+ offset = ptr - base - align;
73584+ if (offset >= m) {
73585+ base += size;
73586+ continue;
73587+ }
73588+
73589+ if (n > m - offset)
73590+ break;
73591+
73592+ spin_unlock_irqrestore(&slob_lock, flags);
73593+ return;
73594+ }
73595+
73596+ spin_unlock_irqrestore(&slob_lock, flags);
73597+report:
73598+ pax_report_usercopy(ptr, n, to, type);
73599+#endif
73600+
73601+}
73602+EXPORT_SYMBOL(check_object_size);
73603+
73604 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
73605 size_t ksize(const void *block)
73606 {
73607@@ -552,10 +645,10 @@ size_t ksize(const void *block)
73608 sp = slob_page(block);
73609 if (is_slob_page(sp)) {
73610 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73611- unsigned int *m = (unsigned int *)(block - align);
73612- return SLOB_UNITS(*m) * SLOB_UNIT;
73613+ slob_t *m = (slob_t *)(block - align);
73614+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
73615 } else
73616- return sp->page.private;
73617+ return sp->size;
73618 }
73619 EXPORT_SYMBOL(ksize);
73620
73621@@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73622 {
73623 struct kmem_cache *c;
73624
73625+#ifdef CONFIG_PAX_USERCOPY
73626+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
73627+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
73628+#else
73629 c = slob_alloc(sizeof(struct kmem_cache),
73630 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
73631+#endif
73632
73633 if (c) {
73634 c->name = name;
73635@@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
73636
73637 lockdep_trace_alloc(flags);
73638
73639+#ifdef CONFIG_PAX_USERCOPY
73640+ b = __kmalloc_node_align(c->size, flags, node, c->align);
73641+#else
73642 if (c->size < PAGE_SIZE) {
73643 b = slob_alloc(c->size, flags, c->align, node);
73644 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73645 SLOB_UNITS(c->size) * SLOB_UNIT,
73646 flags, node);
73647 } else {
73648+ struct slob_page *sp;
73649+
73650 b = slob_new_pages(flags, get_order(c->size), node);
73651+ sp = slob_page(b);
73652+ sp->size = c->size;
73653 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73654 PAGE_SIZE << get_order(c->size),
73655 flags, node);
73656 }
73657+#endif
73658
73659 if (c->ctor)
73660 c->ctor(b);
73661@@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
73662
73663 static void __kmem_cache_free(void *b, int size)
73664 {
73665- if (size < PAGE_SIZE)
73666+ struct slob_page *sp = slob_page(b);
73667+
73668+ if (is_slob_page(sp))
73669 slob_free(b, size);
73670- else
73671+ else {
73672+ clear_slob_page(sp);
73673+ free_slob_page(sp);
73674+ sp->size = 0;
73675 slob_free_pages(b, get_order(size));
73676+ }
73677 }
73678
73679 static void kmem_rcu_free(struct rcu_head *head)
73680@@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
73681
73682 void kmem_cache_free(struct kmem_cache *c, void *b)
73683 {
73684+ int size = c->size;
73685+
73686+#ifdef CONFIG_PAX_USERCOPY
73687+ if (size + c->align < PAGE_SIZE) {
73688+ size += c->align;
73689+ b -= c->align;
73690+ }
73691+#endif
73692+
73693 kmemleak_free_recursive(b, c->flags);
73694 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
73695 struct slob_rcu *slob_rcu;
73696- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
73697- slob_rcu->size = c->size;
73698+ slob_rcu = b + (size - sizeof(struct slob_rcu));
73699+ slob_rcu->size = size;
73700 call_rcu(&slob_rcu->head, kmem_rcu_free);
73701 } else {
73702- __kmem_cache_free(b, c->size);
73703+ __kmem_cache_free(b, size);
73704 }
73705
73706+#ifdef CONFIG_PAX_USERCOPY
73707+ trace_kfree(_RET_IP_, b);
73708+#else
73709 trace_kmem_cache_free(_RET_IP_, b);
73710+#endif
73711+
73712 }
73713 EXPORT_SYMBOL(kmem_cache_free);
73714
73715diff --git a/mm/slub.c b/mm/slub.c
73716index 0342a5d..8180ae9 100644
73717--- a/mm/slub.c
73718+++ b/mm/slub.c
73719@@ -208,7 +208,7 @@ struct track {
73720
73721 enum track_item { TRACK_ALLOC, TRACK_FREE };
73722
73723-#ifdef CONFIG_SYSFS
73724+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73725 static int sysfs_slab_add(struct kmem_cache *);
73726 static int sysfs_slab_alias(struct kmem_cache *, const char *);
73727 static void sysfs_slab_remove(struct kmem_cache *);
73728@@ -532,7 +532,7 @@ static void print_track(const char *s, struct track *t)
73729 if (!t->addr)
73730 return;
73731
73732- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
73733+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
73734 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
73735 #ifdef CONFIG_STACKTRACE
73736 {
73737@@ -2571,6 +2571,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
73738
73739 page = virt_to_head_page(x);
73740
73741+ BUG_ON(!PageSlab(page));
73742+
73743 slab_free(s, page, x, _RET_IP_);
73744
73745 trace_kmem_cache_free(_RET_IP_, x);
73746@@ -2604,7 +2606,7 @@ static int slub_min_objects;
73747 * Merge control. If this is set then no merging of slab caches will occur.
73748 * (Could be removed. This was introduced to pacify the merge skeptics.)
73749 */
73750-static int slub_nomerge;
73751+static int slub_nomerge = 1;
73752
73753 /*
73754 * Calculate the order of allocation given an slab object size.
73755@@ -3057,7 +3059,7 @@ static int kmem_cache_open(struct kmem_cache *s,
73756 else
73757 s->cpu_partial = 30;
73758
73759- s->refcount = 1;
73760+ atomic_set(&s->refcount, 1);
73761 #ifdef CONFIG_NUMA
73762 s->remote_node_defrag_ratio = 1000;
73763 #endif
73764@@ -3161,8 +3163,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
73765 void kmem_cache_destroy(struct kmem_cache *s)
73766 {
73767 down_write(&slub_lock);
73768- s->refcount--;
73769- if (!s->refcount) {
73770+ if (atomic_dec_and_test(&s->refcount)) {
73771 list_del(&s->list);
73772 up_write(&slub_lock);
73773 if (kmem_cache_close(s)) {
73774@@ -3373,6 +3374,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
73775 EXPORT_SYMBOL(__kmalloc_node);
73776 #endif
73777
73778+void check_object_size(const void *ptr, unsigned long n, bool to)
73779+{
73780+
73781+#ifdef CONFIG_PAX_USERCOPY
73782+ struct page *page;
73783+ struct kmem_cache *s = NULL;
73784+ unsigned long offset;
73785+ const char *type;
73786+
73787+ if (!n)
73788+ return;
73789+
73790+ type = "<null>";
73791+ if (ZERO_OR_NULL_PTR(ptr))
73792+ goto report;
73793+
73794+ if (!virt_addr_valid(ptr))
73795+ return;
73796+
73797+ page = virt_to_head_page(ptr);
73798+
73799+ type = "<process stack>";
73800+ if (!PageSlab(page)) {
73801+ if (object_is_on_stack(ptr, n) == -1)
73802+ goto report;
73803+ return;
73804+ }
73805+
73806+ s = page->slab;
73807+ type = s->name;
73808+ if (!(s->flags & SLAB_USERCOPY))
73809+ goto report;
73810+
73811+ offset = (ptr - page_address(page)) % s->size;
73812+ if (offset <= s->objsize && n <= s->objsize - offset)
73813+ return;
73814+
73815+report:
73816+ pax_report_usercopy(ptr, n, to, type);
73817+#endif
73818+
73819+}
73820+EXPORT_SYMBOL(check_object_size);
73821+
73822 size_t ksize(const void *object)
73823 {
73824 struct page *page;
73825@@ -3647,7 +3692,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
73826 int node;
73827
73828 list_add(&s->list, &slab_caches);
73829- s->refcount = -1;
73830+ atomic_set(&s->refcount, -1);
73831
73832 for_each_node_state(node, N_NORMAL_MEMORY) {
73833 struct kmem_cache_node *n = get_node(s, node);
73834@@ -3767,17 +3812,17 @@ void __init kmem_cache_init(void)
73835
73836 /* Caches that are not of the two-to-the-power-of size */
73837 if (KMALLOC_MIN_SIZE <= 32) {
73838- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73839+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73840 caches++;
73841 }
73842
73843 if (KMALLOC_MIN_SIZE <= 64) {
73844- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73845+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
73846 caches++;
73847 }
73848
73849 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
73850- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
73851+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
73852 caches++;
73853 }
73854
73855@@ -3845,7 +3890,7 @@ static int slab_unmergeable(struct kmem_cache *s)
73856 /*
73857 * We may have set a slab to be unmergeable during bootstrap.
73858 */
73859- if (s->refcount < 0)
73860+ if (atomic_read(&s->refcount) < 0)
73861 return 1;
73862
73863 return 0;
73864@@ -3904,7 +3949,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73865 down_write(&slub_lock);
73866 s = find_mergeable(size, align, flags, name, ctor);
73867 if (s) {
73868- s->refcount++;
73869+ atomic_inc(&s->refcount);
73870 /*
73871 * Adjust the object sizes so that we clear
73872 * the complete object on kzalloc.
73873@@ -3913,7 +3958,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73874 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
73875
73876 if (sysfs_slab_alias(s, name)) {
73877- s->refcount--;
73878+ atomic_dec(&s->refcount);
73879 goto err;
73880 }
73881 up_write(&slub_lock);
73882@@ -4042,7 +4087,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
73883 }
73884 #endif
73885
73886-#ifdef CONFIG_SYSFS
73887+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73888 static int count_inuse(struct page *page)
73889 {
73890 return page->inuse;
73891@@ -4429,12 +4474,12 @@ static void resiliency_test(void)
73892 validate_slab_cache(kmalloc_caches[9]);
73893 }
73894 #else
73895-#ifdef CONFIG_SYSFS
73896+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73897 static void resiliency_test(void) {};
73898 #endif
73899 #endif
73900
73901-#ifdef CONFIG_SYSFS
73902+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73903 enum slab_stat_type {
73904 SL_ALL, /* All slabs */
73905 SL_PARTIAL, /* Only partially allocated slabs */
73906@@ -4677,7 +4722,7 @@ SLAB_ATTR_RO(ctor);
73907
73908 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
73909 {
73910- return sprintf(buf, "%d\n", s->refcount - 1);
73911+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
73912 }
73913 SLAB_ATTR_RO(aliases);
73914
73915@@ -5244,6 +5289,7 @@ static char *create_unique_id(struct kmem_cache *s)
73916 return name;
73917 }
73918
73919+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73920 static int sysfs_slab_add(struct kmem_cache *s)
73921 {
73922 int err;
73923@@ -5306,6 +5352,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
73924 kobject_del(&s->kobj);
73925 kobject_put(&s->kobj);
73926 }
73927+#endif
73928
73929 /*
73930 * Need to buffer aliases during bootup until sysfs becomes
73931@@ -5319,6 +5366,7 @@ struct saved_alias {
73932
73933 static struct saved_alias *alias_list;
73934
73935+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73936 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73937 {
73938 struct saved_alias *al;
73939@@ -5341,6 +5389,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73940 alias_list = al;
73941 return 0;
73942 }
73943+#endif
73944
73945 static int __init slab_sysfs_init(void)
73946 {
73947diff --git a/mm/swap.c b/mm/swap.c
73948index 14380e9..e244704 100644
73949--- a/mm/swap.c
73950+++ b/mm/swap.c
73951@@ -30,6 +30,7 @@
73952 #include <linux/backing-dev.h>
73953 #include <linux/memcontrol.h>
73954 #include <linux/gfp.h>
73955+#include <linux/hugetlb.h>
73956
73957 #include "internal.h"
73958
73959@@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
73960
73961 __page_cache_release(page);
73962 dtor = get_compound_page_dtor(page);
73963+ if (!PageHuge(page))
73964+ BUG_ON(dtor != free_compound_page);
73965 (*dtor)(page);
73966 }
73967
73968diff --git a/mm/swapfile.c b/mm/swapfile.c
73969index f31b29d..8bdcae2 100644
73970--- a/mm/swapfile.c
73971+++ b/mm/swapfile.c
73972@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
73973
73974 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
73975 /* Activity counter to indicate that a swapon or swapoff has occurred */
73976-static atomic_t proc_poll_event = ATOMIC_INIT(0);
73977+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
73978
73979 static inline unsigned char swap_count(unsigned char ent)
73980 {
73981@@ -1669,7 +1669,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
73982 }
73983 filp_close(swap_file, NULL);
73984 err = 0;
73985- atomic_inc(&proc_poll_event);
73986+ atomic_inc_unchecked(&proc_poll_event);
73987 wake_up_interruptible(&proc_poll_wait);
73988
73989 out_dput:
73990@@ -1685,8 +1685,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
73991
73992 poll_wait(file, &proc_poll_wait, wait);
73993
73994- if (seq->poll_event != atomic_read(&proc_poll_event)) {
73995- seq->poll_event = atomic_read(&proc_poll_event);
73996+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
73997+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73998 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
73999 }
74000
74001@@ -1784,7 +1784,7 @@ static int swaps_open(struct inode *inode, struct file *file)
74002 return ret;
74003
74004 seq = file->private_data;
74005- seq->poll_event = atomic_read(&proc_poll_event);
74006+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
74007 return 0;
74008 }
74009
74010@@ -2122,7 +2122,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
74011 (p->flags & SWP_DISCARDABLE) ? "D" : "");
74012
74013 mutex_unlock(&swapon_mutex);
74014- atomic_inc(&proc_poll_event);
74015+ atomic_inc_unchecked(&proc_poll_event);
74016 wake_up_interruptible(&proc_poll_wait);
74017
74018 if (S_ISREG(inode->i_mode))
74019diff --git a/mm/util.c b/mm/util.c
74020index 136ac4f..f917fa9 100644
74021--- a/mm/util.c
74022+++ b/mm/util.c
74023@@ -243,6 +243,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
74024 void arch_pick_mmap_layout(struct mm_struct *mm)
74025 {
74026 mm->mmap_base = TASK_UNMAPPED_BASE;
74027+
74028+#ifdef CONFIG_PAX_RANDMMAP
74029+ if (mm->pax_flags & MF_PAX_RANDMMAP)
74030+ mm->mmap_base += mm->delta_mmap;
74031+#endif
74032+
74033 mm->get_unmapped_area = arch_get_unmapped_area;
74034 mm->unmap_area = arch_unmap_area;
74035 }
74036diff --git a/mm/vmalloc.c b/mm/vmalloc.c
74037index 86ce9a5..bc498f3 100644
74038--- a/mm/vmalloc.c
74039+++ b/mm/vmalloc.c
74040@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
74041
74042 pte = pte_offset_kernel(pmd, addr);
74043 do {
74044- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74045- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74046+
74047+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74048+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
74049+ BUG_ON(!pte_exec(*pte));
74050+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
74051+ continue;
74052+ }
74053+#endif
74054+
74055+ {
74056+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74057+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74058+ }
74059 } while (pte++, addr += PAGE_SIZE, addr != end);
74060 }
74061
74062@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74063 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
74064 {
74065 pte_t *pte;
74066+ int ret = -ENOMEM;
74067
74068 /*
74069 * nr is a running index into the array which helps higher level
74070@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74071 pte = pte_alloc_kernel(pmd, addr);
74072 if (!pte)
74073 return -ENOMEM;
74074+
74075+ pax_open_kernel();
74076 do {
74077 struct page *page = pages[*nr];
74078
74079- if (WARN_ON(!pte_none(*pte)))
74080- return -EBUSY;
74081- if (WARN_ON(!page))
74082- return -ENOMEM;
74083+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74084+ if (pgprot_val(prot) & _PAGE_NX)
74085+#endif
74086+
74087+ if (WARN_ON(!pte_none(*pte))) {
74088+ ret = -EBUSY;
74089+ goto out;
74090+ }
74091+ if (WARN_ON(!page)) {
74092+ ret = -ENOMEM;
74093+ goto out;
74094+ }
74095 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
74096 (*nr)++;
74097 } while (pte++, addr += PAGE_SIZE, addr != end);
74098- return 0;
74099+ ret = 0;
74100+out:
74101+ pax_close_kernel();
74102+ return ret;
74103 }
74104
74105 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
74106@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
74107 * and fall back on vmalloc() if that fails. Others
74108 * just put it in the vmalloc space.
74109 */
74110-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
74111+#ifdef CONFIG_MODULES
74112+#ifdef MODULES_VADDR
74113 unsigned long addr = (unsigned long)x;
74114 if (addr >= MODULES_VADDR && addr < MODULES_END)
74115 return 1;
74116 #endif
74117+
74118+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74119+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
74120+ return 1;
74121+#endif
74122+
74123+#endif
74124+
74125 return is_vmalloc_addr(x);
74126 }
74127
74128@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
74129
74130 if (!pgd_none(*pgd)) {
74131 pud_t *pud = pud_offset(pgd, addr);
74132+#ifdef CONFIG_X86
74133+ if (!pud_large(*pud))
74134+#endif
74135 if (!pud_none(*pud)) {
74136 pmd_t *pmd = pmd_offset(pud, addr);
74137+#ifdef CONFIG_X86
74138+ if (!pmd_large(*pmd))
74139+#endif
74140 if (!pmd_none(*pmd)) {
74141 pte_t *ptep, pte;
74142
74143@@ -1319,6 +1359,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
74144 struct vm_struct *area;
74145
74146 BUG_ON(in_interrupt());
74147+
74148+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74149+ if (flags & VM_KERNEXEC) {
74150+ if (start != VMALLOC_START || end != VMALLOC_END)
74151+ return NULL;
74152+ start = (unsigned long)MODULES_EXEC_VADDR;
74153+ end = (unsigned long)MODULES_EXEC_END;
74154+ }
74155+#endif
74156+
74157 if (flags & VM_IOREMAP) {
74158 int bit = fls(size);
74159
74160@@ -1551,6 +1601,11 @@ void *vmap(struct page **pages, unsigned int count,
74161 if (count > totalram_pages)
74162 return NULL;
74163
74164+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74165+ if (!(pgprot_val(prot) & _PAGE_NX))
74166+ flags |= VM_KERNEXEC;
74167+#endif
74168+
74169 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
74170 __builtin_return_address(0));
74171 if (!area)
74172@@ -1652,6 +1707,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
74173 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
74174 goto fail;
74175
74176+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74177+ if (!(pgprot_val(prot) & _PAGE_NX))
74178+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
74179+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
74180+ else
74181+#endif
74182+
74183 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
74184 start, end, node, gfp_mask, caller);
74185 if (!area)
74186@@ -1825,10 +1887,9 @@ EXPORT_SYMBOL(vzalloc_node);
74187 * For tight control over page level allocator and protection flags
74188 * use __vmalloc() instead.
74189 */
74190-
74191 void *vmalloc_exec(unsigned long size)
74192 {
74193- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
74194+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
74195 -1, __builtin_return_address(0));
74196 }
74197
74198@@ -2123,6 +2184,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
74199 unsigned long uaddr = vma->vm_start;
74200 unsigned long usize = vma->vm_end - vma->vm_start;
74201
74202+ BUG_ON(vma->vm_mirror);
74203+
74204 if ((PAGE_SIZE-1) & (unsigned long)addr)
74205 return -EINVAL;
74206
74207diff --git a/mm/vmstat.c b/mm/vmstat.c
74208index f600557..1459fc8 100644
74209--- a/mm/vmstat.c
74210+++ b/mm/vmstat.c
74211@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
74212 *
74213 * vm_stat contains the global counters
74214 */
74215-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
74216+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
74217 EXPORT_SYMBOL(vm_stat);
74218
74219 #ifdef CONFIG_SMP
74220@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
74221 v = p->vm_stat_diff[i];
74222 p->vm_stat_diff[i] = 0;
74223 local_irq_restore(flags);
74224- atomic_long_add(v, &zone->vm_stat[i]);
74225+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
74226 global_diff[i] += v;
74227 #ifdef CONFIG_NUMA
74228 /* 3 seconds idle till flush */
74229@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
74230
74231 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
74232 if (global_diff[i])
74233- atomic_long_add(global_diff[i], &vm_stat[i]);
74234+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
74235 }
74236
74237 #endif
74238@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
74239 start_cpu_timer(cpu);
74240 #endif
74241 #ifdef CONFIG_PROC_FS
74242- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
74243- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
74244- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
74245- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
74246+ {
74247+ mode_t gr_mode = S_IRUGO;
74248+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74249+ gr_mode = S_IRUSR;
74250+#endif
74251+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
74252+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
74253+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74254+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
74255+#else
74256+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
74257+#endif
74258+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
74259+ }
74260 #endif
74261 return 0;
74262 }
74263diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
74264index efea35b..9c8dd0b 100644
74265--- a/net/8021q/vlan.c
74266+++ b/net/8021q/vlan.c
74267@@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
74268 err = -EPERM;
74269 if (!capable(CAP_NET_ADMIN))
74270 break;
74271- if ((args.u.name_type >= 0) &&
74272- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
74273+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
74274 struct vlan_net *vn;
74275
74276 vn = net_generic(net, vlan_net_id);
74277diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
74278index fccae26..e7ece2f 100644
74279--- a/net/9p/trans_fd.c
74280+++ b/net/9p/trans_fd.c
74281@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
74282 oldfs = get_fs();
74283 set_fs(get_ds());
74284 /* The cast to a user pointer is valid due to the set_fs() */
74285- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
74286+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
74287 set_fs(oldfs);
74288
74289 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
74290diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
74291index 876fbe8..8bbea9f 100644
74292--- a/net/atm/atm_misc.c
74293+++ b/net/atm/atm_misc.c
74294@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
74295 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
74296 return 1;
74297 atm_return(vcc, truesize);
74298- atomic_inc(&vcc->stats->rx_drop);
74299+ atomic_inc_unchecked(&vcc->stats->rx_drop);
74300 return 0;
74301 }
74302 EXPORT_SYMBOL(atm_charge);
74303@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
74304 }
74305 }
74306 atm_return(vcc, guess);
74307- atomic_inc(&vcc->stats->rx_drop);
74308+ atomic_inc_unchecked(&vcc->stats->rx_drop);
74309 return NULL;
74310 }
74311 EXPORT_SYMBOL(atm_alloc_charge);
74312@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
74313
74314 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74315 {
74316-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74317+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74318 __SONET_ITEMS
74319 #undef __HANDLE_ITEM
74320 }
74321@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
74322
74323 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74324 {
74325-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74326+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
74327 __SONET_ITEMS
74328 #undef __HANDLE_ITEM
74329 }
74330diff --git a/net/atm/lec.h b/net/atm/lec.h
74331index dfc0719..47c5322 100644
74332--- a/net/atm/lec.h
74333+++ b/net/atm/lec.h
74334@@ -48,7 +48,7 @@ struct lane2_ops {
74335 const u8 *tlvs, u32 sizeoftlvs);
74336 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
74337 const u8 *tlvs, u32 sizeoftlvs);
74338-};
74339+} __no_const;
74340
74341 /*
74342 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
74343diff --git a/net/atm/mpc.h b/net/atm/mpc.h
74344index 0919a88..a23d54e 100644
74345--- a/net/atm/mpc.h
74346+++ b/net/atm/mpc.h
74347@@ -33,7 +33,7 @@ struct mpoa_client {
74348 struct mpc_parameters parameters; /* parameters for this client */
74349
74350 const struct net_device_ops *old_ops;
74351- struct net_device_ops new_ops;
74352+ net_device_ops_no_const new_ops;
74353 };
74354
74355
74356diff --git a/net/atm/proc.c b/net/atm/proc.c
74357index 0d020de..011c7bb 100644
74358--- a/net/atm/proc.c
74359+++ b/net/atm/proc.c
74360@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
74361 const struct k_atm_aal_stats *stats)
74362 {
74363 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
74364- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
74365- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
74366- atomic_read(&stats->rx_drop));
74367+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
74368+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
74369+ atomic_read_unchecked(&stats->rx_drop));
74370 }
74371
74372 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
74373diff --git a/net/atm/resources.c b/net/atm/resources.c
74374index 23f45ce..c748f1a 100644
74375--- a/net/atm/resources.c
74376+++ b/net/atm/resources.c
74377@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
74378 static void copy_aal_stats(struct k_atm_aal_stats *from,
74379 struct atm_aal_stats *to)
74380 {
74381-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74382+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74383 __AAL_STAT_ITEMS
74384 #undef __HANDLE_ITEM
74385 }
74386@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
74387 static void subtract_aal_stats(struct k_atm_aal_stats *from,
74388 struct atm_aal_stats *to)
74389 {
74390-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74391+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
74392 __AAL_STAT_ITEMS
74393 #undef __HANDLE_ITEM
74394 }
74395diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
74396index 3512e25..2b33401 100644
74397--- a/net/batman-adv/bat_iv_ogm.c
74398+++ b/net/batman-adv/bat_iv_ogm.c
74399@@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
74400
74401 /* change sequence number to network order */
74402 batman_ogm_packet->seqno =
74403- htonl((uint32_t)atomic_read(&hard_iface->seqno));
74404+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
74405
74406 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
74407 batman_ogm_packet->tt_crc = htons((uint16_t)
74408@@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
74409 else
74410 batman_ogm_packet->gw_flags = NO_FLAGS;
74411
74412- atomic_inc(&hard_iface->seqno);
74413+ atomic_inc_unchecked(&hard_iface->seqno);
74414
74415 slide_own_bcast_window(hard_iface);
74416 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
74417@@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
74418 return;
74419
74420 /* could be changed by schedule_own_packet() */
74421- if_incoming_seqno = atomic_read(&if_incoming->seqno);
74422+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
74423
74424 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
74425
74426diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
74427index 7704df4..beb4e16 100644
74428--- a/net/batman-adv/hard-interface.c
74429+++ b/net/batman-adv/hard-interface.c
74430@@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
74431 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
74432 dev_add_pack(&hard_iface->batman_adv_ptype);
74433
74434- atomic_set(&hard_iface->seqno, 1);
74435- atomic_set(&hard_iface->frag_seqno, 1);
74436+ atomic_set_unchecked(&hard_iface->seqno, 1);
74437+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
74438 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
74439 hard_iface->net_dev->name);
74440
74441diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
74442index 987c75a..20d6f36 100644
74443--- a/net/batman-adv/soft-interface.c
74444+++ b/net/batman-adv/soft-interface.c
74445@@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
74446
74447 /* set broadcast sequence number */
74448 bcast_packet->seqno =
74449- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
74450+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
74451
74452 add_bcast_packet_to_list(bat_priv, skb, 1);
74453
74454@@ -843,7 +843,7 @@ struct net_device *softif_create(const char *name)
74455 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
74456
74457 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
74458- atomic_set(&bat_priv->bcast_seqno, 1);
74459+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
74460 atomic_set(&bat_priv->ttvn, 0);
74461 atomic_set(&bat_priv->tt_local_changes, 0);
74462 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
74463diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
74464index e9eb043..d174eeb 100644
74465--- a/net/batman-adv/types.h
74466+++ b/net/batman-adv/types.h
74467@@ -38,8 +38,8 @@ struct hard_iface {
74468 int16_t if_num;
74469 char if_status;
74470 struct net_device *net_dev;
74471- atomic_t seqno;
74472- atomic_t frag_seqno;
74473+ atomic_unchecked_t seqno;
74474+ atomic_unchecked_t frag_seqno;
74475 unsigned char *packet_buff;
74476 int packet_len;
74477 struct kobject *hardif_obj;
74478@@ -154,7 +154,7 @@ struct bat_priv {
74479 atomic_t orig_interval; /* uint */
74480 atomic_t hop_penalty; /* uint */
74481 atomic_t log_level; /* uint */
74482- atomic_t bcast_seqno;
74483+ atomic_unchecked_t bcast_seqno;
74484 atomic_t bcast_queue_left;
74485 atomic_t batman_queue_left;
74486 atomic_t ttvn; /* translation table version number */
74487diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
74488index 07d1c1d..7e9bea9 100644
74489--- a/net/batman-adv/unicast.c
74490+++ b/net/batman-adv/unicast.c
74491@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
74492 frag1->flags = UNI_FRAG_HEAD | large_tail;
74493 frag2->flags = large_tail;
74494
74495- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
74496+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
74497 frag1->seqno = htons(seqno - 1);
74498 frag2->seqno = htons(seqno);
74499
74500diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
74501index 07bc69e..21e76b1 100644
74502--- a/net/bluetooth/hci_conn.c
74503+++ b/net/bluetooth/hci_conn.c
74504@@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
74505 memset(&cp, 0, sizeof(cp));
74506
74507 cp.handle = cpu_to_le16(conn->handle);
74508- memcpy(cp.ltk, ltk, sizeof(ltk));
74509+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
74510
74511 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
74512 }
74513diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
74514index 32d338c..d24bcdb 100644
74515--- a/net/bluetooth/l2cap_core.c
74516+++ b/net/bluetooth/l2cap_core.c
74517@@ -2418,8 +2418,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
74518 break;
74519
74520 case L2CAP_CONF_RFC:
74521- if (olen == sizeof(rfc))
74522- memcpy(&rfc, (void *)val, olen);
74523+ if (olen != sizeof(rfc))
74524+ break;
74525+
74526+ memcpy(&rfc, (void *)val, olen);
74527
74528 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
74529 rfc.mode != chan->mode)
74530@@ -2537,8 +2539,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
74531
74532 switch (type) {
74533 case L2CAP_CONF_RFC:
74534- if (olen == sizeof(rfc))
74535- memcpy(&rfc, (void *)val, olen);
74536+ if (olen != sizeof(rfc))
74537+ break;
74538+
74539+ memcpy(&rfc, (void *)val, olen);
74540 goto done;
74541 }
74542 }
74543diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
74544index 5449294..7da9a5f 100644
74545--- a/net/bridge/netfilter/ebt_ulog.c
74546+++ b/net/bridge/netfilter/ebt_ulog.c
74547@@ -96,6 +96,7 @@ static void ulog_timer(unsigned long data)
74548 spin_unlock_bh(&ulog_buffers[data].lock);
74549 }
74550
74551+static struct sk_buff *ulog_alloc_skb(unsigned int size) __size_overflow(1);
74552 static struct sk_buff *ulog_alloc_skb(unsigned int size)
74553 {
74554 struct sk_buff *skb;
74555diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
74556index 5fe2ff3..10968b5 100644
74557--- a/net/bridge/netfilter/ebtables.c
74558+++ b/net/bridge/netfilter/ebtables.c
74559@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
74560 tmp.valid_hooks = t->table->valid_hooks;
74561 }
74562 mutex_unlock(&ebt_mutex);
74563- if (copy_to_user(user, &tmp, *len) != 0){
74564+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
74565 BUGPRINT("c2u Didn't work\n");
74566 ret = -EFAULT;
74567 break;
74568diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
74569index a97d97a..6f679ed 100644
74570--- a/net/caif/caif_socket.c
74571+++ b/net/caif/caif_socket.c
74572@@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
74573 #ifdef CONFIG_DEBUG_FS
74574 struct debug_fs_counter {
74575 atomic_t caif_nr_socks;
74576- atomic_t caif_sock_create;
74577- atomic_t num_connect_req;
74578- atomic_t num_connect_resp;
74579- atomic_t num_connect_fail_resp;
74580- atomic_t num_disconnect;
74581- atomic_t num_remote_shutdown_ind;
74582- atomic_t num_tx_flow_off_ind;
74583- atomic_t num_tx_flow_on_ind;
74584- atomic_t num_rx_flow_off;
74585- atomic_t num_rx_flow_on;
74586+ atomic_unchecked_t caif_sock_create;
74587+ atomic_unchecked_t num_connect_req;
74588+ atomic_unchecked_t num_connect_resp;
74589+ atomic_unchecked_t num_connect_fail_resp;
74590+ atomic_unchecked_t num_disconnect;
74591+ atomic_unchecked_t num_remote_shutdown_ind;
74592+ atomic_unchecked_t num_tx_flow_off_ind;
74593+ atomic_unchecked_t num_tx_flow_on_ind;
74594+ atomic_unchecked_t num_rx_flow_off;
74595+ atomic_unchecked_t num_rx_flow_on;
74596 };
74597 static struct debug_fs_counter cnt;
74598 #define dbfs_atomic_inc(v) atomic_inc_return(v)
74599+#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
74600 #define dbfs_atomic_dec(v) atomic_dec_return(v)
74601 #else
74602 #define dbfs_atomic_inc(v) 0
74603@@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74604 atomic_read(&cf_sk->sk.sk_rmem_alloc),
74605 sk_rcvbuf_lowwater(cf_sk));
74606 set_rx_flow_off(cf_sk);
74607- dbfs_atomic_inc(&cnt.num_rx_flow_off);
74608+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
74609 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
74610 }
74611
74612@@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
74613 set_rx_flow_off(cf_sk);
74614 if (net_ratelimit())
74615 pr_debug("sending flow OFF due to rmem_schedule\n");
74616- dbfs_atomic_inc(&cnt.num_rx_flow_off);
74617+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
74618 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
74619 }
74620 skb->dev = NULL;
74621@@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
74622 switch (flow) {
74623 case CAIF_CTRLCMD_FLOW_ON_IND:
74624 /* OK from modem to start sending again */
74625- dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
74626+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
74627 set_tx_flow_on(cf_sk);
74628 cf_sk->sk.sk_state_change(&cf_sk->sk);
74629 break;
74630
74631 case CAIF_CTRLCMD_FLOW_OFF_IND:
74632 /* Modem asks us to shut up */
74633- dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
74634+ dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
74635 set_tx_flow_off(cf_sk);
74636 cf_sk->sk.sk_state_change(&cf_sk->sk);
74637 break;
74638@@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74639 /* We're now connected */
74640 caif_client_register_refcnt(&cf_sk->layer,
74641 cfsk_hold, cfsk_put);
74642- dbfs_atomic_inc(&cnt.num_connect_resp);
74643+ dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
74644 cf_sk->sk.sk_state = CAIF_CONNECTED;
74645 set_tx_flow_on(cf_sk);
74646 cf_sk->sk.sk_state_change(&cf_sk->sk);
74647@@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74648
74649 case CAIF_CTRLCMD_INIT_FAIL_RSP:
74650 /* Connect request failed */
74651- dbfs_atomic_inc(&cnt.num_connect_fail_resp);
74652+ dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
74653 cf_sk->sk.sk_err = ECONNREFUSED;
74654 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
74655 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
74656@@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
74657
74658 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
74659 /* Modem has closed this connection, or device is down. */
74660- dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
74661+ dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
74662 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
74663 cf_sk->sk.sk_err = ECONNRESET;
74664 set_rx_flow_on(cf_sk);
74665@@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
74666 return;
74667
74668 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
74669- dbfs_atomic_inc(&cnt.num_rx_flow_on);
74670+ dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
74671 set_rx_flow_on(cf_sk);
74672 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
74673 }
74674@@ -856,7 +857,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
74675 /*ifindex = id of the interface.*/
74676 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
74677
74678- dbfs_atomic_inc(&cnt.num_connect_req);
74679+ dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
74680 cf_sk->layer.receive = caif_sktrecv_cb;
74681
74682 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
74683@@ -945,7 +946,7 @@ static int caif_release(struct socket *sock)
74684 spin_unlock_bh(&sk->sk_receive_queue.lock);
74685 sock->sk = NULL;
74686
74687- dbfs_atomic_inc(&cnt.num_disconnect);
74688+ dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
74689
74690 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
74691 if (cf_sk->debugfs_socket_dir != NULL)
74692@@ -1124,7 +1125,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
74693 cf_sk->conn_req.protocol = protocol;
74694 /* Increase the number of sockets created. */
74695 dbfs_atomic_inc(&cnt.caif_nr_socks);
74696- num = dbfs_atomic_inc(&cnt.caif_sock_create);
74697+ num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
74698 #ifdef CONFIG_DEBUG_FS
74699 if (!IS_ERR(debugfsdir)) {
74700
74701diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
74702index 5cf5222..6f704ad 100644
74703--- a/net/caif/cfctrl.c
74704+++ b/net/caif/cfctrl.c
74705@@ -9,6 +9,7 @@
74706 #include <linux/stddef.h>
74707 #include <linux/spinlock.h>
74708 #include <linux/slab.h>
74709+#include <linux/sched.h>
74710 #include <net/caif/caif_layer.h>
74711 #include <net/caif/cfpkt.h>
74712 #include <net/caif/cfctrl.h>
74713@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
74714 memset(&dev_info, 0, sizeof(dev_info));
74715 dev_info.id = 0xff;
74716 cfsrvl_init(&this->serv, 0, &dev_info, false);
74717- atomic_set(&this->req_seq_no, 1);
74718- atomic_set(&this->rsp_seq_no, 1);
74719+ atomic_set_unchecked(&this->req_seq_no, 1);
74720+ atomic_set_unchecked(&this->rsp_seq_no, 1);
74721 this->serv.layer.receive = cfctrl_recv;
74722 sprintf(this->serv.layer.name, "ctrl");
74723 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
74724@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
74725 struct cfctrl_request_info *req)
74726 {
74727 spin_lock_bh(&ctrl->info_list_lock);
74728- atomic_inc(&ctrl->req_seq_no);
74729- req->sequence_no = atomic_read(&ctrl->req_seq_no);
74730+ atomic_inc_unchecked(&ctrl->req_seq_no);
74731+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
74732 list_add_tail(&req->list, &ctrl->list);
74733 spin_unlock_bh(&ctrl->info_list_lock);
74734 }
74735@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
74736 if (p != first)
74737 pr_warn("Requests are not received in order\n");
74738
74739- atomic_set(&ctrl->rsp_seq_no,
74740+ atomic_set_unchecked(&ctrl->rsp_seq_no,
74741 p->sequence_no);
74742 list_del(&p->list);
74743 goto out;
74744diff --git a/net/can/gw.c b/net/can/gw.c
74745index 3d79b12..8de85fa 100644
74746--- a/net/can/gw.c
74747+++ b/net/can/gw.c
74748@@ -96,7 +96,7 @@ struct cf_mod {
74749 struct {
74750 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
74751 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
74752- } csumfunc;
74753+ } __no_const csumfunc;
74754 };
74755
74756
74757diff --git a/net/compat.c b/net/compat.c
74758index 6def90e..c6992fa 100644
74759--- a/net/compat.c
74760+++ b/net/compat.c
74761@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
74762 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
74763 __get_user(kmsg->msg_flags, &umsg->msg_flags))
74764 return -EFAULT;
74765- kmsg->msg_name = compat_ptr(tmp1);
74766- kmsg->msg_iov = compat_ptr(tmp2);
74767- kmsg->msg_control = compat_ptr(tmp3);
74768+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
74769+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
74770+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
74771 return 0;
74772 }
74773
74774@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74775
74776 if (kern_msg->msg_namelen) {
74777 if (mode == VERIFY_READ) {
74778- int err = move_addr_to_kernel(kern_msg->msg_name,
74779+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
74780 kern_msg->msg_namelen,
74781 kern_address);
74782 if (err < 0)
74783@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74784 kern_msg->msg_name = NULL;
74785
74786 tot_len = iov_from_user_compat_to_kern(kern_iov,
74787- (struct compat_iovec __user *)kern_msg->msg_iov,
74788+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
74789 kern_msg->msg_iovlen);
74790 if (tot_len >= 0)
74791 kern_msg->msg_iov = kern_iov;
74792@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74793
74794 #define CMSG_COMPAT_FIRSTHDR(msg) \
74795 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
74796- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
74797+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
74798 (struct compat_cmsghdr __user *)NULL)
74799
74800 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
74801 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
74802 (ucmlen) <= (unsigned long) \
74803 ((mhdr)->msg_controllen - \
74804- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
74805+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
74806
74807 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
74808 struct compat_cmsghdr __user *cmsg, int cmsg_len)
74809 {
74810 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
74811- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
74812+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
74813 msg->msg_controllen)
74814 return NULL;
74815 return (struct compat_cmsghdr __user *)ptr;
74816@@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74817 {
74818 struct compat_timeval ctv;
74819 struct compat_timespec cts[3];
74820- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74821+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74822 struct compat_cmsghdr cmhdr;
74823 int cmlen;
74824
74825@@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74826
74827 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
74828 {
74829- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74830+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74831 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
74832 int fdnum = scm->fp->count;
74833 struct file **fp = scm->fp->fp;
74834@@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
74835 return -EFAULT;
74836 old_fs = get_fs();
74837 set_fs(KERNEL_DS);
74838- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
74839+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
74840 set_fs(old_fs);
74841
74842 return err;
74843@@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
74844 len = sizeof(ktime);
74845 old_fs = get_fs();
74846 set_fs(KERNEL_DS);
74847- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
74848+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
74849 set_fs(old_fs);
74850
74851 if (!err) {
74852@@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74853 case MCAST_JOIN_GROUP:
74854 case MCAST_LEAVE_GROUP:
74855 {
74856- struct compat_group_req __user *gr32 = (void *)optval;
74857+ struct compat_group_req __user *gr32 = (void __user *)optval;
74858 struct group_req __user *kgr =
74859 compat_alloc_user_space(sizeof(struct group_req));
74860 u32 interface;
74861@@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74862 case MCAST_BLOCK_SOURCE:
74863 case MCAST_UNBLOCK_SOURCE:
74864 {
74865- struct compat_group_source_req __user *gsr32 = (void *)optval;
74866+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
74867 struct group_source_req __user *kgsr = compat_alloc_user_space(
74868 sizeof(struct group_source_req));
74869 u32 interface;
74870@@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74871 }
74872 case MCAST_MSFILTER:
74873 {
74874- struct compat_group_filter __user *gf32 = (void *)optval;
74875+ struct compat_group_filter __user *gf32 = (void __user *)optval;
74876 struct group_filter __user *kgf;
74877 u32 interface, fmode, numsrc;
74878
74879@@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
74880 char __user *optval, int __user *optlen,
74881 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
74882 {
74883- struct compat_group_filter __user *gf32 = (void *)optval;
74884+ struct compat_group_filter __user *gf32 = (void __user *)optval;
74885 struct group_filter __user *kgf;
74886 int __user *koptlen;
74887 u32 interface, fmode, numsrc;
74888diff --git a/net/core/datagram.c b/net/core/datagram.c
74889index 68bbf9f..5ef0d12 100644
74890--- a/net/core/datagram.c
74891+++ b/net/core/datagram.c
74892@@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
74893 }
74894
74895 kfree_skb(skb);
74896- atomic_inc(&sk->sk_drops);
74897+ atomic_inc_unchecked(&sk->sk_drops);
74898 sk_mem_reclaim_partial(sk);
74899
74900 return err;
74901diff --git a/net/core/dev.c b/net/core/dev.c
74902index a4bf943..9c83051 100644
74903--- a/net/core/dev.c
74904+++ b/net/core/dev.c
74905@@ -1138,10 +1138,14 @@ void dev_load(struct net *net, const char *name)
74906 if (no_module && capable(CAP_NET_ADMIN))
74907 no_module = request_module("netdev-%s", name);
74908 if (no_module && capable(CAP_SYS_MODULE)) {
74909+#ifdef CONFIG_GRKERNSEC_MODHARDEN
74910+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
74911+#else
74912 if (!request_module("%s", name))
74913 pr_err("Loading kernel module for a network device "
74914 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
74915 "instead\n", name);
74916+#endif
74917 }
74918 }
74919 EXPORT_SYMBOL(dev_load);
74920@@ -1585,7 +1589,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74921 {
74922 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
74923 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
74924- atomic_long_inc(&dev->rx_dropped);
74925+ atomic_long_inc_unchecked(&dev->rx_dropped);
74926 kfree_skb(skb);
74927 return NET_RX_DROP;
74928 }
74929@@ -1595,7 +1599,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74930 nf_reset(skb);
74931
74932 if (unlikely(!is_skb_forwardable(dev, skb))) {
74933- atomic_long_inc(&dev->rx_dropped);
74934+ atomic_long_inc_unchecked(&dev->rx_dropped);
74935 kfree_skb(skb);
74936 return NET_RX_DROP;
74937 }
74938@@ -2057,7 +2061,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
74939
74940 struct dev_gso_cb {
74941 void (*destructor)(struct sk_buff *skb);
74942-};
74943+} __no_const;
74944
74945 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
74946
74947@@ -2913,7 +2917,7 @@ enqueue:
74948
74949 local_irq_restore(flags);
74950
74951- atomic_long_inc(&skb->dev->rx_dropped);
74952+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74953 kfree_skb(skb);
74954 return NET_RX_DROP;
74955 }
74956@@ -2985,7 +2989,7 @@ int netif_rx_ni(struct sk_buff *skb)
74957 }
74958 EXPORT_SYMBOL(netif_rx_ni);
74959
74960-static void net_tx_action(struct softirq_action *h)
74961+static void net_tx_action(void)
74962 {
74963 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74964
74965@@ -3273,7 +3277,7 @@ ncls:
74966 if (pt_prev) {
74967 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
74968 } else {
74969- atomic_long_inc(&skb->dev->rx_dropped);
74970+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74971 kfree_skb(skb);
74972 /* Jamal, now you will not able to escape explaining
74973 * me how you were going to use this. :-)
74974@@ -3833,7 +3837,7 @@ void netif_napi_del(struct napi_struct *napi)
74975 }
74976 EXPORT_SYMBOL(netif_napi_del);
74977
74978-static void net_rx_action(struct softirq_action *h)
74979+static void net_rx_action(void)
74980 {
74981 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74982 unsigned long time_limit = jiffies + 2;
74983@@ -5890,7 +5894,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
74984 } else {
74985 netdev_stats_to_stats64(storage, &dev->stats);
74986 }
74987- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
74988+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
74989 return storage;
74990 }
74991 EXPORT_SYMBOL(dev_get_stats);
74992diff --git a/net/core/flow.c b/net/core/flow.c
74993index e318c7e..168b1d0 100644
74994--- a/net/core/flow.c
74995+++ b/net/core/flow.c
74996@@ -61,7 +61,7 @@ struct flow_cache {
74997 struct timer_list rnd_timer;
74998 };
74999
75000-atomic_t flow_cache_genid = ATOMIC_INIT(0);
75001+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
75002 EXPORT_SYMBOL(flow_cache_genid);
75003 static struct flow_cache flow_cache_global;
75004 static struct kmem_cache *flow_cachep __read_mostly;
75005@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
75006
75007 static int flow_entry_valid(struct flow_cache_entry *fle)
75008 {
75009- if (atomic_read(&flow_cache_genid) != fle->genid)
75010+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
75011 return 0;
75012 if (fle->object && !fle->object->ops->check(fle->object))
75013 return 0;
75014@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
75015 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
75016 fcp->hash_count++;
75017 }
75018- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
75019+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
75020 flo = fle->object;
75021 if (!flo)
75022 goto ret_object;
75023@@ -280,7 +280,7 @@ nocache:
75024 }
75025 flo = resolver(net, key, family, dir, flo, ctx);
75026 if (fle) {
75027- fle->genid = atomic_read(&flow_cache_genid);
75028+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
75029 if (!IS_ERR(flo))
75030 fle->object = flo;
75031 else
75032diff --git a/net/core/iovec.c b/net/core/iovec.c
75033index c40f27e..7f49254 100644
75034--- a/net/core/iovec.c
75035+++ b/net/core/iovec.c
75036@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
75037 if (m->msg_namelen) {
75038 if (mode == VERIFY_READ) {
75039 void __user *namep;
75040- namep = (void __user __force *) m->msg_name;
75041+ namep = (void __force_user *) m->msg_name;
75042 err = move_addr_to_kernel(namep, m->msg_namelen,
75043 address);
75044 if (err < 0)
75045@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
75046 }
75047
75048 size = m->msg_iovlen * sizeof(struct iovec);
75049- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
75050+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
75051 return -EFAULT;
75052
75053 m->msg_iov = iov;
75054diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
75055index 5c30296..ebe7b61 100644
75056--- a/net/core/rtnetlink.c
75057+++ b/net/core/rtnetlink.c
75058@@ -57,7 +57,7 @@ struct rtnl_link {
75059 rtnl_doit_func doit;
75060 rtnl_dumpit_func dumpit;
75061 rtnl_calcit_func calcit;
75062-};
75063+} __no_const;
75064
75065 static DEFINE_MUTEX(rtnl_mutex);
75066
75067diff --git a/net/core/scm.c b/net/core/scm.c
75068index ff52ad0..aff1c0f 100644
75069--- a/net/core/scm.c
75070+++ b/net/core/scm.c
75071@@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
75072 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75073 {
75074 struct cmsghdr __user *cm
75075- = (__force struct cmsghdr __user *)msg->msg_control;
75076+ = (struct cmsghdr __force_user *)msg->msg_control;
75077 struct cmsghdr cmhdr;
75078 int cmlen = CMSG_LEN(len);
75079 int err;
75080@@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75081 err = -EFAULT;
75082 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
75083 goto out;
75084- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
75085+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
75086 goto out;
75087 cmlen = CMSG_SPACE(len);
75088 if (msg->msg_controllen < cmlen)
75089@@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
75090 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75091 {
75092 struct cmsghdr __user *cm
75093- = (__force struct cmsghdr __user*)msg->msg_control;
75094+ = (struct cmsghdr __force_user *)msg->msg_control;
75095
75096 int fdmax = 0;
75097 int fdnum = scm->fp->count;
75098@@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75099 if (fdnum < fdmax)
75100 fdmax = fdnum;
75101
75102- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
75103+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
75104 i++, cmfptr++)
75105 {
75106 int new_fd;
75107diff --git a/net/core/skbuff.c b/net/core/skbuff.c
75108index da0c97f..8253632 100644
75109--- a/net/core/skbuff.c
75110+++ b/net/core/skbuff.c
75111@@ -3160,6 +3160,8 @@ static void sock_rmem_free(struct sk_buff *skb)
75112 */
75113 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
75114 {
75115+ int len = skb->len;
75116+
75117 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
75118 (unsigned)sk->sk_rcvbuf)
75119 return -ENOMEM;
75120@@ -3174,7 +3176,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
75121
75122 skb_queue_tail(&sk->sk_error_queue, skb);
75123 if (!sock_flag(sk, SOCK_DEAD))
75124- sk->sk_data_ready(sk, skb->len);
75125+ sk->sk_data_ready(sk, len);
75126 return 0;
75127 }
75128 EXPORT_SYMBOL(sock_queue_err_skb);
75129diff --git a/net/core/sock.c b/net/core/sock.c
75130index 02f8dfe..86dfd4a 100644
75131--- a/net/core/sock.c
75132+++ b/net/core/sock.c
75133@@ -341,7 +341,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75134 struct sk_buff_head *list = &sk->sk_receive_queue;
75135
75136 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
75137- atomic_inc(&sk->sk_drops);
75138+ atomic_inc_unchecked(&sk->sk_drops);
75139 trace_sock_rcvqueue_full(sk, skb);
75140 return -ENOMEM;
75141 }
75142@@ -351,7 +351,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75143 return err;
75144
75145 if (!sk_rmem_schedule(sk, skb->truesize)) {
75146- atomic_inc(&sk->sk_drops);
75147+ atomic_inc_unchecked(&sk->sk_drops);
75148 return -ENOBUFS;
75149 }
75150
75151@@ -371,7 +371,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75152 skb_dst_force(skb);
75153
75154 spin_lock_irqsave(&list->lock, flags);
75155- skb->dropcount = atomic_read(&sk->sk_drops);
75156+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
75157 __skb_queue_tail(list, skb);
75158 spin_unlock_irqrestore(&list->lock, flags);
75159
75160@@ -391,7 +391,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
75161 skb->dev = NULL;
75162
75163 if (sk_rcvqueues_full(sk, skb)) {
75164- atomic_inc(&sk->sk_drops);
75165+ atomic_inc_unchecked(&sk->sk_drops);
75166 goto discard_and_relse;
75167 }
75168 if (nested)
75169@@ -409,7 +409,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
75170 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
75171 } else if (sk_add_backlog(sk, skb)) {
75172 bh_unlock_sock(sk);
75173- atomic_inc(&sk->sk_drops);
75174+ atomic_inc_unchecked(&sk->sk_drops);
75175 goto discard_and_relse;
75176 }
75177
75178@@ -974,7 +974,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75179 if (len > sizeof(peercred))
75180 len = sizeof(peercred);
75181 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
75182- if (copy_to_user(optval, &peercred, len))
75183+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
75184 return -EFAULT;
75185 goto lenout;
75186 }
75187@@ -987,7 +987,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75188 return -ENOTCONN;
75189 if (lv < len)
75190 return -EINVAL;
75191- if (copy_to_user(optval, address, len))
75192+ if (len > sizeof(address) || copy_to_user(optval, address, len))
75193 return -EFAULT;
75194 goto lenout;
75195 }
75196@@ -1024,7 +1024,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75197
75198 if (len > lv)
75199 len = lv;
75200- if (copy_to_user(optval, &v, len))
75201+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
75202 return -EFAULT;
75203 lenout:
75204 if (put_user(len, optlen))
75205@@ -2108,7 +2108,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
75206 */
75207 smp_wmb();
75208 atomic_set(&sk->sk_refcnt, 1);
75209- atomic_set(&sk->sk_drops, 0);
75210+ atomic_set_unchecked(&sk->sk_drops, 0);
75211 }
75212 EXPORT_SYMBOL(sock_init_data);
75213
75214diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
75215index b9868e1..849f809 100644
75216--- a/net/core/sock_diag.c
75217+++ b/net/core/sock_diag.c
75218@@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
75219
75220 int sock_diag_check_cookie(void *sk, __u32 *cookie)
75221 {
75222+#ifndef CONFIG_GRKERNSEC_HIDESYM
75223 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
75224 cookie[1] != INET_DIAG_NOCOOKIE) &&
75225 ((u32)(unsigned long)sk != cookie[0] ||
75226 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
75227 return -ESTALE;
75228 else
75229+#endif
75230 return 0;
75231 }
75232 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
75233
75234 void sock_diag_save_cookie(void *sk, __u32 *cookie)
75235 {
75236+#ifdef CONFIG_GRKERNSEC_HIDESYM
75237+ cookie[0] = 0;
75238+ cookie[1] = 0;
75239+#else
75240 cookie[0] = (u32)(unsigned long)sk;
75241 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
75242+#endif
75243 }
75244 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
75245
75246diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
75247index 02e75d1..9a57a7c 100644
75248--- a/net/decnet/sysctl_net_decnet.c
75249+++ b/net/decnet/sysctl_net_decnet.c
75250@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
75251
75252 if (len > *lenp) len = *lenp;
75253
75254- if (copy_to_user(buffer, addr, len))
75255+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
75256 return -EFAULT;
75257
75258 *lenp = len;
75259@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
75260
75261 if (len > *lenp) len = *lenp;
75262
75263- if (copy_to_user(buffer, devname, len))
75264+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
75265 return -EFAULT;
75266
75267 *lenp = len;
75268diff --git a/net/econet/Kconfig b/net/econet/Kconfig
75269index 39a2d29..f39c0fe 100644
75270--- a/net/econet/Kconfig
75271+++ b/net/econet/Kconfig
75272@@ -4,7 +4,7 @@
75273
75274 config ECONET
75275 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
75276- depends on EXPERIMENTAL && INET
75277+ depends on EXPERIMENTAL && INET && BROKEN
75278 ---help---
75279 Econet is a fairly old and slow networking protocol mainly used by
75280 Acorn computers to access file and print servers. It uses native
75281diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
75282index 36d1440..44ff28b 100644
75283--- a/net/ipv4/ah4.c
75284+++ b/net/ipv4/ah4.c
75285@@ -19,6 +19,8 @@ struct ah_skb_cb {
75286 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
75287
75288 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
75289+ unsigned int size) __size_overflow(3);
75290+static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
75291 unsigned int size)
75292 {
75293 unsigned int len;
75294diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
75295index 92fc5f6..b790d91 100644
75296--- a/net/ipv4/fib_frontend.c
75297+++ b/net/ipv4/fib_frontend.c
75298@@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
75299 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75300 fib_sync_up(dev);
75301 #endif
75302- atomic_inc(&net->ipv4.dev_addr_genid);
75303+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75304 rt_cache_flush(dev_net(dev), -1);
75305 break;
75306 case NETDEV_DOWN:
75307 fib_del_ifaddr(ifa, NULL);
75308- atomic_inc(&net->ipv4.dev_addr_genid);
75309+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75310 if (ifa->ifa_dev->ifa_list == NULL) {
75311 /* Last address was deleted from this interface.
75312 * Disable IP.
75313@@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
75314 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75315 fib_sync_up(dev);
75316 #endif
75317- atomic_inc(&net->ipv4.dev_addr_genid);
75318+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75319 rt_cache_flush(dev_net(dev), -1);
75320 break;
75321 case NETDEV_DOWN:
75322diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
75323index 80106d8..232e898 100644
75324--- a/net/ipv4/fib_semantics.c
75325+++ b/net/ipv4/fib_semantics.c
75326@@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
75327 nh->nh_saddr = inet_select_addr(nh->nh_dev,
75328 nh->nh_gw,
75329 nh->nh_parent->fib_scope);
75330- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
75331+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
75332
75333 return nh->nh_saddr;
75334 }
75335diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
75336index 984ec65..97ac518 100644
75337--- a/net/ipv4/inet_hashtables.c
75338+++ b/net/ipv4/inet_hashtables.c
75339@@ -18,12 +18,15 @@
75340 #include <linux/sched.h>
75341 #include <linux/slab.h>
75342 #include <linux/wait.h>
75343+#include <linux/security.h>
75344
75345 #include <net/inet_connection_sock.h>
75346 #include <net/inet_hashtables.h>
75347 #include <net/secure_seq.h>
75348 #include <net/ip.h>
75349
75350+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
75351+
75352 /*
75353 * Allocate and initialize a new local port bind bucket.
75354 * The bindhash mutex for snum's hash chain must be held here.
75355@@ -530,6 +533,8 @@ ok:
75356 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
75357 spin_unlock(&head->lock);
75358
75359+ gr_update_task_in_ip_table(current, inet_sk(sk));
75360+
75361 if (tw) {
75362 inet_twsk_deschedule(tw, death_row);
75363 while (twrefcnt) {
75364diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
75365index d4d61b6..b81aec8 100644
75366--- a/net/ipv4/inetpeer.c
75367+++ b/net/ipv4/inetpeer.c
75368@@ -487,8 +487,8 @@ relookup:
75369 if (p) {
75370 p->daddr = *daddr;
75371 atomic_set(&p->refcnt, 1);
75372- atomic_set(&p->rid, 0);
75373- atomic_set(&p->ip_id_count,
75374+ atomic_set_unchecked(&p->rid, 0);
75375+ atomic_set_unchecked(&p->ip_id_count,
75376 (daddr->family == AF_INET) ?
75377 secure_ip_id(daddr->addr.a4) :
75378 secure_ipv6_id(daddr->addr.a6));
75379diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
75380index 1f23a57..7180dfe 100644
75381--- a/net/ipv4/ip_fragment.c
75382+++ b/net/ipv4/ip_fragment.c
75383@@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
75384 return 0;
75385
75386 start = qp->rid;
75387- end = atomic_inc_return(&peer->rid);
75388+ end = atomic_inc_return_unchecked(&peer->rid);
75389 qp->rid = end;
75390
75391 rc = qp->q.fragments && (end - start) > max;
75392diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
75393index 8aa87c1..35c3248 100644
75394--- a/net/ipv4/ip_sockglue.c
75395+++ b/net/ipv4/ip_sockglue.c
75396@@ -1112,7 +1112,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75397 len = min_t(unsigned int, len, opt->optlen);
75398 if (put_user(len, optlen))
75399 return -EFAULT;
75400- if (copy_to_user(optval, opt->__data, len))
75401+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
75402+ copy_to_user(optval, opt->__data, len))
75403 return -EFAULT;
75404 return 0;
75405 }
75406@@ -1240,7 +1241,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75407 if (sk->sk_type != SOCK_STREAM)
75408 return -ENOPROTOOPT;
75409
75410- msg.msg_control = optval;
75411+ msg.msg_control = (void __force_kernel *)optval;
75412 msg.msg_controllen = len;
75413 msg.msg_flags = flags;
75414
75415diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
75416index 6e412a6..6640538 100644
75417--- a/net/ipv4/ipconfig.c
75418+++ b/net/ipv4/ipconfig.c
75419@@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
75420
75421 mm_segment_t oldfs = get_fs();
75422 set_fs(get_ds());
75423- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75424+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75425 set_fs(oldfs);
75426 return res;
75427 }
75428@@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
75429
75430 mm_segment_t oldfs = get_fs();
75431 set_fs(get_ds());
75432- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75433+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75434 set_fs(oldfs);
75435 return res;
75436 }
75437@@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
75438
75439 mm_segment_t oldfs = get_fs();
75440 set_fs(get_ds());
75441- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
75442+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
75443 set_fs(oldfs);
75444 return res;
75445 }
75446diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
75447index fd7a3f6..a1b1013 100644
75448--- a/net/ipv4/netfilter/arp_tables.c
75449+++ b/net/ipv4/netfilter/arp_tables.c
75450@@ -757,6 +757,9 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
75451
75452 static int copy_entries_to_user(unsigned int total_size,
75453 const struct xt_table *table,
75454+ void __user *userptr) __size_overflow(1);
75455+static int copy_entries_to_user(unsigned int total_size,
75456+ const struct xt_table *table,
75457 void __user *userptr)
75458 {
75459 unsigned int off, num;
75460@@ -984,6 +987,11 @@ static int __do_replace(struct net *net, const char *name,
75461 unsigned int valid_hooks,
75462 struct xt_table_info *newinfo,
75463 unsigned int num_counters,
75464+ void __user *counters_ptr) __size_overflow(5);
75465+static int __do_replace(struct net *net, const char *name,
75466+ unsigned int valid_hooks,
75467+ struct xt_table_info *newinfo,
75468+ unsigned int num_counters,
75469 void __user *counters_ptr)
75470 {
75471 int ret;
75472@@ -1104,6 +1112,8 @@ static int do_replace(struct net *net, const void __user *user,
75473 }
75474
75475 static int do_add_counters(struct net *net, const void __user *user,
75476+ unsigned int len, int compat) __size_overflow(3);
75477+static int do_add_counters(struct net *net, const void __user *user,
75478 unsigned int len, int compat)
75479 {
75480 unsigned int i, curcpu;
75481diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
75482index 24e556e..b073356 100644
75483--- a/net/ipv4/netfilter/ip_tables.c
75484+++ b/net/ipv4/netfilter/ip_tables.c
75485@@ -923,6 +923,10 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
75486 static int
75487 copy_entries_to_user(unsigned int total_size,
75488 const struct xt_table *table,
75489+ void __user *userptr) __size_overflow(1);
75490+static int
75491+copy_entries_to_user(unsigned int total_size,
75492+ const struct xt_table *table,
75493 void __user *userptr)
75494 {
75495 unsigned int off, num;
75496@@ -1172,6 +1176,10 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
75497 static int
75498 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
75499 struct xt_table_info *newinfo, unsigned int num_counters,
75500+ void __user *counters_ptr) __size_overflow(5);
75501+static int
75502+__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
75503+ struct xt_table_info *newinfo, unsigned int num_counters,
75504 void __user *counters_ptr)
75505 {
75506 int ret;
75507@@ -1293,6 +1301,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
75508
75509 static int
75510 do_add_counters(struct net *net, const void __user *user,
75511+ unsigned int len, int compat) __size_overflow(3);
75512+static int
75513+do_add_counters(struct net *net, const void __user *user,
75514 unsigned int len, int compat)
75515 {
75516 unsigned int i, curcpu;
75517diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
75518index ba5756d..8d34d74 100644
75519--- a/net/ipv4/netfilter/ipt_ULOG.c
75520+++ b/net/ipv4/netfilter/ipt_ULOG.c
75521@@ -125,6 +125,7 @@ static void ulog_timer(unsigned long data)
75522 spin_unlock_bh(&ulog_lock);
75523 }
75524
75525+static struct sk_buff *ulog_alloc_skb(unsigned int size) __size_overflow(1);
75526 static struct sk_buff *ulog_alloc_skb(unsigned int size)
75527 {
75528 struct sk_buff *skb;
75529diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
75530index 2133c30..0e8047e 100644
75531--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
75532+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
75533@@ -435,6 +435,10 @@ static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
75534 static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
75535 unsigned char *eoc,
75536 unsigned long **oid,
75537+ unsigned int *len) __size_overflow(2);
75538+static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
75539+ unsigned char *eoc,
75540+ unsigned long **oid,
75541 unsigned int *len)
75542 {
75543 unsigned long subid;
75544diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
75545index b072386..abdebcf 100644
75546--- a/net/ipv4/ping.c
75547+++ b/net/ipv4/ping.c
75548@@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
75549 sk_rmem_alloc_get(sp),
75550 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75551 atomic_read(&sp->sk_refcnt), sp,
75552- atomic_read(&sp->sk_drops), len);
75553+ atomic_read_unchecked(&sp->sk_drops), len);
75554 }
75555
75556 static int ping_seq_show(struct seq_file *seq, void *v)
75557diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
75558index 3ccda5a..3c1e61d 100644
75559--- a/net/ipv4/raw.c
75560+++ b/net/ipv4/raw.c
75561@@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
75562 int raw_rcv(struct sock *sk, struct sk_buff *skb)
75563 {
75564 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
75565- atomic_inc(&sk->sk_drops);
75566+ atomic_inc_unchecked(&sk->sk_drops);
75567 kfree_skb(skb);
75568 return NET_RX_DROP;
75569 }
75570@@ -742,16 +742,20 @@ static int raw_init(struct sock *sk)
75571
75572 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
75573 {
75574+ struct icmp_filter filter;
75575+
75576 if (optlen > sizeof(struct icmp_filter))
75577 optlen = sizeof(struct icmp_filter);
75578- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
75579+ if (copy_from_user(&filter, optval, optlen))
75580 return -EFAULT;
75581+ raw_sk(sk)->filter = filter;
75582 return 0;
75583 }
75584
75585 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
75586 {
75587 int len, ret = -EFAULT;
75588+ struct icmp_filter filter;
75589
75590 if (get_user(len, optlen))
75591 goto out;
75592@@ -761,8 +765,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
75593 if (len > sizeof(struct icmp_filter))
75594 len = sizeof(struct icmp_filter);
75595 ret = -EFAULT;
75596- if (put_user(len, optlen) ||
75597- copy_to_user(optval, &raw_sk(sk)->filter, len))
75598+ filter = raw_sk(sk)->filter;
75599+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
75600 goto out;
75601 ret = 0;
75602 out: return ret;
75603@@ -990,7 +994,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75604 sk_wmem_alloc_get(sp),
75605 sk_rmem_alloc_get(sp),
75606 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75607- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75608+ atomic_read(&sp->sk_refcnt),
75609+#ifdef CONFIG_GRKERNSEC_HIDESYM
75610+ NULL,
75611+#else
75612+ sp,
75613+#endif
75614+ atomic_read_unchecked(&sp->sk_drops));
75615 }
75616
75617 static int raw_seq_show(struct seq_file *seq, void *v)
75618diff --git a/net/ipv4/route.c b/net/ipv4/route.c
75619index 0197747..7adb0dc 100644
75620--- a/net/ipv4/route.c
75621+++ b/net/ipv4/route.c
75622@@ -311,7 +311,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
75623
75624 static inline int rt_genid(struct net *net)
75625 {
75626- return atomic_read(&net->ipv4.rt_genid);
75627+ return atomic_read_unchecked(&net->ipv4.rt_genid);
75628 }
75629
75630 #ifdef CONFIG_PROC_FS
75631@@ -935,7 +935,7 @@ static void rt_cache_invalidate(struct net *net)
75632 unsigned char shuffle;
75633
75634 get_random_bytes(&shuffle, sizeof(shuffle));
75635- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
75636+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
75637 inetpeer_invalidate_tree(AF_INET);
75638 }
75639
75640@@ -3010,7 +3010,7 @@ static int rt_fill_info(struct net *net,
75641 error = rt->dst.error;
75642 if (peer) {
75643 inet_peer_refcheck(rt->peer);
75644- id = atomic_read(&peer->ip_id_count) & 0xffff;
75645+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
75646 if (peer->tcp_ts_stamp) {
75647 ts = peer->tcp_ts;
75648 tsage = get_seconds() - peer->tcp_ts_stamp;
75649diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
75650index fd54c5f..96d6407 100644
75651--- a/net/ipv4/tcp_ipv4.c
75652+++ b/net/ipv4/tcp_ipv4.c
75653@@ -88,6 +88,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
75654 int sysctl_tcp_low_latency __read_mostly;
75655 EXPORT_SYMBOL(sysctl_tcp_low_latency);
75656
75657+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75658+extern int grsec_enable_blackhole;
75659+#endif
75660
75661 #ifdef CONFIG_TCP_MD5SIG
75662 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
75663@@ -1638,6 +1641,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
75664 return 0;
75665
75666 reset:
75667+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75668+ if (!grsec_enable_blackhole)
75669+#endif
75670 tcp_v4_send_reset(rsk, skb);
75671 discard:
75672 kfree_skb(skb);
75673@@ -1700,12 +1706,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
75674 TCP_SKB_CB(skb)->sacked = 0;
75675
75676 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75677- if (!sk)
75678+ if (!sk) {
75679+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75680+ ret = 1;
75681+#endif
75682 goto no_tcp_socket;
75683-
75684+ }
75685 process:
75686- if (sk->sk_state == TCP_TIME_WAIT)
75687+ if (sk->sk_state == TCP_TIME_WAIT) {
75688+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75689+ ret = 2;
75690+#endif
75691 goto do_time_wait;
75692+ }
75693
75694 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
75695 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75696@@ -1755,6 +1768,10 @@ no_tcp_socket:
75697 bad_packet:
75698 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75699 } else {
75700+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75701+ if (!grsec_enable_blackhole || (ret == 1 &&
75702+ (skb->dev->flags & IFF_LOOPBACK)))
75703+#endif
75704 tcp_v4_send_reset(NULL, skb);
75705 }
75706
75707@@ -2417,7 +2434,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
75708 0, /* non standard timer */
75709 0, /* open_requests have no inode */
75710 atomic_read(&sk->sk_refcnt),
75711+#ifdef CONFIG_GRKERNSEC_HIDESYM
75712+ NULL,
75713+#else
75714 req,
75715+#endif
75716 len);
75717 }
75718
75719@@ -2467,7 +2488,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
75720 sock_i_uid(sk),
75721 icsk->icsk_probes_out,
75722 sock_i_ino(sk),
75723- atomic_read(&sk->sk_refcnt), sk,
75724+ atomic_read(&sk->sk_refcnt),
75725+#ifdef CONFIG_GRKERNSEC_HIDESYM
75726+ NULL,
75727+#else
75728+ sk,
75729+#endif
75730 jiffies_to_clock_t(icsk->icsk_rto),
75731 jiffies_to_clock_t(icsk->icsk_ack.ato),
75732 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
75733@@ -2495,7 +2521,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
75734 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
75735 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
75736 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75737- atomic_read(&tw->tw_refcnt), tw, len);
75738+ atomic_read(&tw->tw_refcnt),
75739+#ifdef CONFIG_GRKERNSEC_HIDESYM
75740+ NULL,
75741+#else
75742+ tw,
75743+#endif
75744+ len);
75745 }
75746
75747 #define TMPSZ 150
75748diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
75749index 550e755..25721b3 100644
75750--- a/net/ipv4/tcp_minisocks.c
75751+++ b/net/ipv4/tcp_minisocks.c
75752@@ -27,6 +27,10 @@
75753 #include <net/inet_common.h>
75754 #include <net/xfrm.h>
75755
75756+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75757+extern int grsec_enable_blackhole;
75758+#endif
75759+
75760 int sysctl_tcp_syncookies __read_mostly = 1;
75761 EXPORT_SYMBOL(sysctl_tcp_syncookies);
75762
75763@@ -753,6 +757,10 @@ listen_overflow:
75764
75765 embryonic_reset:
75766 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
75767+
75768+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75769+ if (!grsec_enable_blackhole)
75770+#endif
75771 if (!(flg & TCP_FLAG_RST))
75772 req->rsk_ops->send_reset(sk, skb);
75773
75774diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
75775index 85ee7eb..53277ab 100644
75776--- a/net/ipv4/tcp_probe.c
75777+++ b/net/ipv4/tcp_probe.c
75778@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
75779 if (cnt + width >= len)
75780 break;
75781
75782- if (copy_to_user(buf + cnt, tbuf, width))
75783+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
75784 return -EFAULT;
75785 cnt += width;
75786 }
75787diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
75788index cd2e072..1fffee2 100644
75789--- a/net/ipv4/tcp_timer.c
75790+++ b/net/ipv4/tcp_timer.c
75791@@ -22,6 +22,10 @@
75792 #include <linux/gfp.h>
75793 #include <net/tcp.h>
75794
75795+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75796+extern int grsec_lastack_retries;
75797+#endif
75798+
75799 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
75800 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
75801 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
75802@@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
75803 }
75804 }
75805
75806+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75807+ if ((sk->sk_state == TCP_LAST_ACK) &&
75808+ (grsec_lastack_retries > 0) &&
75809+ (grsec_lastack_retries < retry_until))
75810+ retry_until = grsec_lastack_retries;
75811+#endif
75812+
75813 if (retransmits_timed_out(sk, retry_until,
75814 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
75815 /* Has it gone just too far? */
75816diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
75817index 5d075b5..d907d5f 100644
75818--- a/net/ipv4/udp.c
75819+++ b/net/ipv4/udp.c
75820@@ -86,6 +86,7 @@
75821 #include <linux/types.h>
75822 #include <linux/fcntl.h>
75823 #include <linux/module.h>
75824+#include <linux/security.h>
75825 #include <linux/socket.h>
75826 #include <linux/sockios.h>
75827 #include <linux/igmp.h>
75828@@ -108,6 +109,10 @@
75829 #include <trace/events/udp.h>
75830 #include "udp_impl.h"
75831
75832+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75833+extern int grsec_enable_blackhole;
75834+#endif
75835+
75836 struct udp_table udp_table __read_mostly;
75837 EXPORT_SYMBOL(udp_table);
75838
75839@@ -566,6 +571,9 @@ found:
75840 return s;
75841 }
75842
75843+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
75844+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
75845+
75846 /*
75847 * This routine is called by the ICMP module when it gets some
75848 * sort of error condition. If err < 0 then the socket should
75849@@ -857,9 +865,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
75850 dport = usin->sin_port;
75851 if (dport == 0)
75852 return -EINVAL;
75853+
75854+ err = gr_search_udp_sendmsg(sk, usin);
75855+ if (err)
75856+ return err;
75857 } else {
75858 if (sk->sk_state != TCP_ESTABLISHED)
75859 return -EDESTADDRREQ;
75860+
75861+ err = gr_search_udp_sendmsg(sk, NULL);
75862+ if (err)
75863+ return err;
75864+
75865 daddr = inet->inet_daddr;
75866 dport = inet->inet_dport;
75867 /* Open fast path for connected socket.
75868@@ -1100,7 +1117,7 @@ static unsigned int first_packet_length(struct sock *sk)
75869 udp_lib_checksum_complete(skb)) {
75870 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75871 IS_UDPLITE(sk));
75872- atomic_inc(&sk->sk_drops);
75873+ atomic_inc_unchecked(&sk->sk_drops);
75874 __skb_unlink(skb, rcvq);
75875 __skb_queue_tail(&list_kill, skb);
75876 }
75877@@ -1186,6 +1203,10 @@ try_again:
75878 if (!skb)
75879 goto out;
75880
75881+ err = gr_search_udp_recvmsg(sk, skb);
75882+ if (err)
75883+ goto out_free;
75884+
75885 ulen = skb->len - sizeof(struct udphdr);
75886 copied = len;
75887 if (copied > ulen)
75888@@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75889
75890 drop:
75891 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75892- atomic_inc(&sk->sk_drops);
75893+ atomic_inc_unchecked(&sk->sk_drops);
75894 kfree_skb(skb);
75895 return -1;
75896 }
75897@@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75898 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
75899
75900 if (!skb1) {
75901- atomic_inc(&sk->sk_drops);
75902+ atomic_inc_unchecked(&sk->sk_drops);
75903 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
75904 IS_UDPLITE(sk));
75905 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75906@@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75907 goto csum_error;
75908
75909 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
75910+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75911+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75912+#endif
75913 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
75914
75915 /*
75916@@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
75917 sk_wmem_alloc_get(sp),
75918 sk_rmem_alloc_get(sp),
75919 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75920- atomic_read(&sp->sk_refcnt), sp,
75921- atomic_read(&sp->sk_drops), len);
75922+ atomic_read(&sp->sk_refcnt),
75923+#ifdef CONFIG_GRKERNSEC_HIDESYM
75924+ NULL,
75925+#else
75926+ sp,
75927+#endif
75928+ atomic_read_unchecked(&sp->sk_drops), len);
75929 }
75930
75931 int udp4_seq_show(struct seq_file *seq, void *v)
75932diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
75933index 6b8ebc5..1d624f4 100644
75934--- a/net/ipv6/addrconf.c
75935+++ b/net/ipv6/addrconf.c
75936@@ -2145,7 +2145,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
75937 p.iph.ihl = 5;
75938 p.iph.protocol = IPPROTO_IPV6;
75939 p.iph.ttl = 64;
75940- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
75941+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
75942
75943 if (ops->ndo_do_ioctl) {
75944 mm_segment_t oldfs = get_fs();
75945diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
75946index 2ae79db..8f101bf 100644
75947--- a/net/ipv6/ah6.c
75948+++ b/net/ipv6/ah6.c
75949@@ -56,6 +56,8 @@ struct ah_skb_cb {
75950 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
75951
75952 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
75953+ unsigned int size) __size_overflow(3);
75954+static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
75955 unsigned int size)
75956 {
75957 unsigned int len;
75958diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
75959index 02dd203..e03fcc9 100644
75960--- a/net/ipv6/inet6_connection_sock.c
75961+++ b/net/ipv6/inet6_connection_sock.c
75962@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
75963 #ifdef CONFIG_XFRM
75964 {
75965 struct rt6_info *rt = (struct rt6_info *)dst;
75966- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
75967+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
75968 }
75969 #endif
75970 }
75971@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
75972 #ifdef CONFIG_XFRM
75973 if (dst) {
75974 struct rt6_info *rt = (struct rt6_info *)dst;
75975- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
75976+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
75977 __sk_dst_reset(sk);
75978 dst = NULL;
75979 }
75980diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
75981index 18a2719..779f36a 100644
75982--- a/net/ipv6/ipv6_sockglue.c
75983+++ b/net/ipv6/ipv6_sockglue.c
75984@@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
75985 if (sk->sk_type != SOCK_STREAM)
75986 return -ENOPROTOOPT;
75987
75988- msg.msg_control = optval;
75989+ msg.msg_control = (void __force_kernel *)optval;
75990 msg.msg_controllen = len;
75991 msg.msg_flags = flags;
75992
75993diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
75994index 94874b0..108a94d 100644
75995--- a/net/ipv6/netfilter/ip6_tables.c
75996+++ b/net/ipv6/netfilter/ip6_tables.c
75997@@ -945,6 +945,10 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
75998 static int
75999 copy_entries_to_user(unsigned int total_size,
76000 const struct xt_table *table,
76001+ void __user *userptr) __size_overflow(1);
76002+static int
76003+copy_entries_to_user(unsigned int total_size,
76004+ const struct xt_table *table,
76005 void __user *userptr)
76006 {
76007 unsigned int off, num;
76008@@ -1194,6 +1198,10 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
76009 static int
76010 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
76011 struct xt_table_info *newinfo, unsigned int num_counters,
76012+ void __user *counters_ptr) __size_overflow(5);
76013+static int
76014+__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
76015+ struct xt_table_info *newinfo, unsigned int num_counters,
76016 void __user *counters_ptr)
76017 {
76018 int ret;
76019@@ -1315,6 +1323,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
76020
76021 static int
76022 do_add_counters(struct net *net, const void __user *user, unsigned int len,
76023+ int compat) __size_overflow(3);
76024+static int
76025+do_add_counters(struct net *net, const void __user *user, unsigned int len,
76026 int compat)
76027 {
76028 unsigned int i, curcpu;
76029diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
76030index d02f7e4..2d2a0f1 100644
76031--- a/net/ipv6/raw.c
76032+++ b/net/ipv6/raw.c
76033@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
76034 {
76035 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
76036 skb_checksum_complete(skb)) {
76037- atomic_inc(&sk->sk_drops);
76038+ atomic_inc_unchecked(&sk->sk_drops);
76039 kfree_skb(skb);
76040 return NET_RX_DROP;
76041 }
76042@@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
76043 struct raw6_sock *rp = raw6_sk(sk);
76044
76045 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
76046- atomic_inc(&sk->sk_drops);
76047+ atomic_inc_unchecked(&sk->sk_drops);
76048 kfree_skb(skb);
76049 return NET_RX_DROP;
76050 }
76051@@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
76052
76053 if (inet->hdrincl) {
76054 if (skb_checksum_complete(skb)) {
76055- atomic_inc(&sk->sk_drops);
76056+ atomic_inc_unchecked(&sk->sk_drops);
76057 kfree_skb(skb);
76058 return NET_RX_DROP;
76059 }
76060@@ -602,7 +602,7 @@ out:
76061 return err;
76062 }
76063
76064-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
76065+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
76066 struct flowi6 *fl6, struct dst_entry **dstp,
76067 unsigned int flags)
76068 {
76069@@ -912,12 +912,15 @@ do_confirm:
76070 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
76071 char __user *optval, int optlen)
76072 {
76073+ struct icmp6_filter filter;
76074+
76075 switch (optname) {
76076 case ICMPV6_FILTER:
76077 if (optlen > sizeof(struct icmp6_filter))
76078 optlen = sizeof(struct icmp6_filter);
76079- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
76080+ if (copy_from_user(&filter, optval, optlen))
76081 return -EFAULT;
76082+ raw6_sk(sk)->filter = filter;
76083 return 0;
76084 default:
76085 return -ENOPROTOOPT;
76086@@ -930,6 +933,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
76087 char __user *optval, int __user *optlen)
76088 {
76089 int len;
76090+ struct icmp6_filter filter;
76091
76092 switch (optname) {
76093 case ICMPV6_FILTER:
76094@@ -941,7 +945,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
76095 len = sizeof(struct icmp6_filter);
76096 if (put_user(len, optlen))
76097 return -EFAULT;
76098- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
76099+ filter = raw6_sk(sk)->filter;
76100+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
76101 return -EFAULT;
76102 return 0;
76103 default:
76104@@ -1248,7 +1253,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
76105 0, 0L, 0,
76106 sock_i_uid(sp), 0,
76107 sock_i_ino(sp),
76108- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
76109+ atomic_read(&sp->sk_refcnt),
76110+#ifdef CONFIG_GRKERNSEC_HIDESYM
76111+ NULL,
76112+#else
76113+ sp,
76114+#endif
76115+ atomic_read_unchecked(&sp->sk_drops));
76116 }
76117
76118 static int raw6_seq_show(struct seq_file *seq, void *v)
76119diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
76120index 3edd05a..63aad01 100644
76121--- a/net/ipv6/tcp_ipv6.c
76122+++ b/net/ipv6/tcp_ipv6.c
76123@@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
76124 }
76125 #endif
76126
76127+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76128+extern int grsec_enable_blackhole;
76129+#endif
76130+
76131 static void tcp_v6_hash(struct sock *sk)
76132 {
76133 if (sk->sk_state != TCP_CLOSE) {
76134@@ -1650,6 +1654,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
76135 return 0;
76136
76137 reset:
76138+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76139+ if (!grsec_enable_blackhole)
76140+#endif
76141 tcp_v6_send_reset(sk, skb);
76142 discard:
76143 if (opt_skb)
76144@@ -1729,12 +1736,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
76145 TCP_SKB_CB(skb)->sacked = 0;
76146
76147 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
76148- if (!sk)
76149+ if (!sk) {
76150+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76151+ ret = 1;
76152+#endif
76153 goto no_tcp_socket;
76154+ }
76155
76156 process:
76157- if (sk->sk_state == TCP_TIME_WAIT)
76158+ if (sk->sk_state == TCP_TIME_WAIT) {
76159+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76160+ ret = 2;
76161+#endif
76162 goto do_time_wait;
76163+ }
76164
76165 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
76166 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
76167@@ -1782,6 +1797,10 @@ no_tcp_socket:
76168 bad_packet:
76169 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
76170 } else {
76171+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76172+ if (!grsec_enable_blackhole || (ret == 1 &&
76173+ (skb->dev->flags & IFF_LOOPBACK)))
76174+#endif
76175 tcp_v6_send_reset(NULL, skb);
76176 }
76177
76178@@ -2043,7 +2062,13 @@ static void get_openreq6(struct seq_file *seq,
76179 uid,
76180 0, /* non standard timer */
76181 0, /* open_requests have no inode */
76182- 0, req);
76183+ 0,
76184+#ifdef CONFIG_GRKERNSEC_HIDESYM
76185+ NULL
76186+#else
76187+ req
76188+#endif
76189+ );
76190 }
76191
76192 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
76193@@ -2093,7 +2118,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
76194 sock_i_uid(sp),
76195 icsk->icsk_probes_out,
76196 sock_i_ino(sp),
76197- atomic_read(&sp->sk_refcnt), sp,
76198+ atomic_read(&sp->sk_refcnt),
76199+#ifdef CONFIG_GRKERNSEC_HIDESYM
76200+ NULL,
76201+#else
76202+ sp,
76203+#endif
76204 jiffies_to_clock_t(icsk->icsk_rto),
76205 jiffies_to_clock_t(icsk->icsk_ack.ato),
76206 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
76207@@ -2128,7 +2158,13 @@ static void get_timewait6_sock(struct seq_file *seq,
76208 dest->s6_addr32[2], dest->s6_addr32[3], destp,
76209 tw->tw_substate, 0, 0,
76210 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
76211- atomic_read(&tw->tw_refcnt), tw);
76212+ atomic_read(&tw->tw_refcnt),
76213+#ifdef CONFIG_GRKERNSEC_HIDESYM
76214+ NULL
76215+#else
76216+ tw
76217+#endif
76218+ );
76219 }
76220
76221 static int tcp6_seq_show(struct seq_file *seq, void *v)
76222diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
76223index 4f96b5c..75543d7 100644
76224--- a/net/ipv6/udp.c
76225+++ b/net/ipv6/udp.c
76226@@ -50,6 +50,10 @@
76227 #include <linux/seq_file.h>
76228 #include "udp_impl.h"
76229
76230+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76231+extern int grsec_enable_blackhole;
76232+#endif
76233+
76234 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
76235 {
76236 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
76237@@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
76238
76239 return 0;
76240 drop:
76241- atomic_inc(&sk->sk_drops);
76242+ atomic_inc_unchecked(&sk->sk_drops);
76243 drop_no_sk_drops_inc:
76244 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
76245 kfree_skb(skb);
76246@@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
76247 continue;
76248 }
76249 drop:
76250- atomic_inc(&sk->sk_drops);
76251+ atomic_inc_unchecked(&sk->sk_drops);
76252 UDP6_INC_STATS_BH(sock_net(sk),
76253 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
76254 UDP6_INC_STATS_BH(sock_net(sk),
76255@@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76256 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
76257 proto == IPPROTO_UDPLITE);
76258
76259+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76260+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
76261+#endif
76262 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
76263
76264 kfree_skb(skb);
76265@@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76266 if (!sock_owned_by_user(sk))
76267 udpv6_queue_rcv_skb(sk, skb);
76268 else if (sk_add_backlog(sk, skb)) {
76269- atomic_inc(&sk->sk_drops);
76270+ atomic_inc_unchecked(&sk->sk_drops);
76271 bh_unlock_sock(sk);
76272 sock_put(sk);
76273 goto discard;
76274@@ -1410,8 +1417,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
76275 0, 0L, 0,
76276 sock_i_uid(sp), 0,
76277 sock_i_ino(sp),
76278- atomic_read(&sp->sk_refcnt), sp,
76279- atomic_read(&sp->sk_drops));
76280+ atomic_read(&sp->sk_refcnt),
76281+#ifdef CONFIG_GRKERNSEC_HIDESYM
76282+ NULL,
76283+#else
76284+ sp,
76285+#endif
76286+ atomic_read_unchecked(&sp->sk_drops));
76287 }
76288
76289 int udp6_seq_show(struct seq_file *seq, void *v)
76290diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
76291index 253695d..9481ce8 100644
76292--- a/net/irda/ircomm/ircomm_tty.c
76293+++ b/net/irda/ircomm/ircomm_tty.c
76294@@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76295 add_wait_queue(&self->open_wait, &wait);
76296
76297 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
76298- __FILE__,__LINE__, tty->driver->name, self->open_count );
76299+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76300
76301 /* As far as I can see, we protect open_count - Jean II */
76302 spin_lock_irqsave(&self->spinlock, flags);
76303 if (!tty_hung_up_p(filp)) {
76304 extra_count = 1;
76305- self->open_count--;
76306+ local_dec(&self->open_count);
76307 }
76308 spin_unlock_irqrestore(&self->spinlock, flags);
76309- self->blocked_open++;
76310+ local_inc(&self->blocked_open);
76311
76312 while (1) {
76313 if (tty->termios->c_cflag & CBAUD) {
76314@@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76315 }
76316
76317 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
76318- __FILE__,__LINE__, tty->driver->name, self->open_count );
76319+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76320
76321 schedule();
76322 }
76323@@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76324 if (extra_count) {
76325 /* ++ is not atomic, so this should be protected - Jean II */
76326 spin_lock_irqsave(&self->spinlock, flags);
76327- self->open_count++;
76328+ local_inc(&self->open_count);
76329 spin_unlock_irqrestore(&self->spinlock, flags);
76330 }
76331- self->blocked_open--;
76332+ local_dec(&self->blocked_open);
76333
76334 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
76335- __FILE__,__LINE__, tty->driver->name, self->open_count);
76336+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
76337
76338 if (!retval)
76339 self->flags |= ASYNC_NORMAL_ACTIVE;
76340@@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
76341 }
76342 /* ++ is not atomic, so this should be protected - Jean II */
76343 spin_lock_irqsave(&self->spinlock, flags);
76344- self->open_count++;
76345+ local_inc(&self->open_count);
76346
76347 tty->driver_data = self;
76348 self->tty = tty;
76349 spin_unlock_irqrestore(&self->spinlock, flags);
76350
76351 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
76352- self->line, self->open_count);
76353+ self->line, local_read(&self->open_count));
76354
76355 /* Not really used by us, but lets do it anyway */
76356 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
76357@@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76358 return;
76359 }
76360
76361- if ((tty->count == 1) && (self->open_count != 1)) {
76362+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
76363 /*
76364 * Uh, oh. tty->count is 1, which means that the tty
76365 * structure will be freed. state->count should always
76366@@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76367 */
76368 IRDA_DEBUG(0, "%s(), bad serial port count; "
76369 "tty->count is 1, state->count is %d\n", __func__ ,
76370- self->open_count);
76371- self->open_count = 1;
76372+ local_read(&self->open_count));
76373+ local_set(&self->open_count, 1);
76374 }
76375
76376- if (--self->open_count < 0) {
76377+ if (local_dec_return(&self->open_count) < 0) {
76378 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
76379- __func__, self->line, self->open_count);
76380- self->open_count = 0;
76381+ __func__, self->line, local_read(&self->open_count));
76382+ local_set(&self->open_count, 0);
76383 }
76384- if (self->open_count) {
76385+ if (local_read(&self->open_count)) {
76386 spin_unlock_irqrestore(&self->spinlock, flags);
76387
76388 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
76389@@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76390 tty->closing = 0;
76391 self->tty = NULL;
76392
76393- if (self->blocked_open) {
76394+ if (local_read(&self->blocked_open)) {
76395 if (self->close_delay)
76396 schedule_timeout_interruptible(self->close_delay);
76397 wake_up_interruptible(&self->open_wait);
76398@@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
76399 spin_lock_irqsave(&self->spinlock, flags);
76400 self->flags &= ~ASYNC_NORMAL_ACTIVE;
76401 self->tty = NULL;
76402- self->open_count = 0;
76403+ local_set(&self->open_count, 0);
76404 spin_unlock_irqrestore(&self->spinlock, flags);
76405
76406 wake_up_interruptible(&self->open_wait);
76407@@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
76408 seq_putc(m, '\n');
76409
76410 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
76411- seq_printf(m, "Open count: %d\n", self->open_count);
76412+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
76413 seq_printf(m, "Max data size: %d\n", self->max_data_size);
76414 seq_printf(m, "Max header size: %d\n", self->max_header_size);
76415
76416diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
76417index d5c5b8f..33beff0 100644
76418--- a/net/iucv/af_iucv.c
76419+++ b/net/iucv/af_iucv.c
76420@@ -764,10 +764,10 @@ static int iucv_sock_autobind(struct sock *sk)
76421
76422 write_lock_bh(&iucv_sk_list.lock);
76423
76424- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
76425+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76426 while (__iucv_get_sock_by_name(name)) {
76427 sprintf(name, "%08x",
76428- atomic_inc_return(&iucv_sk_list.autobind_name));
76429+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76430 }
76431
76432 write_unlock_bh(&iucv_sk_list.lock);
76433diff --git a/net/key/af_key.c b/net/key/af_key.c
76434index 11dbb22..c20f667 100644
76435--- a/net/key/af_key.c
76436+++ b/net/key/af_key.c
76437@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
76438 static u32 get_acqseq(void)
76439 {
76440 u32 res;
76441- static atomic_t acqseq;
76442+ static atomic_unchecked_t acqseq;
76443
76444 do {
76445- res = atomic_inc_return(&acqseq);
76446+ res = atomic_inc_return_unchecked(&acqseq);
76447 } while (!res);
76448 return res;
76449 }
76450diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
76451index 2f0642d..e5c6fba 100644
76452--- a/net/mac80211/ieee80211_i.h
76453+++ b/net/mac80211/ieee80211_i.h
76454@@ -28,6 +28,7 @@
76455 #include <net/ieee80211_radiotap.h>
76456 #include <net/cfg80211.h>
76457 #include <net/mac80211.h>
76458+#include <asm/local.h>
76459 #include "key.h"
76460 #include "sta_info.h"
76461
76462@@ -781,7 +782,7 @@ struct ieee80211_local {
76463 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
76464 spinlock_t queue_stop_reason_lock;
76465
76466- int open_count;
76467+ local_t open_count;
76468 int monitors, cooked_mntrs;
76469 /* number of interfaces with corresponding FIF_ flags */
76470 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
76471diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
76472index 8e2137b..2974283 100644
76473--- a/net/mac80211/iface.c
76474+++ b/net/mac80211/iface.c
76475@@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76476 break;
76477 }
76478
76479- if (local->open_count == 0) {
76480+ if (local_read(&local->open_count) == 0) {
76481 res = drv_start(local);
76482 if (res)
76483 goto err_del_bss;
76484@@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76485 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
76486
76487 if (!is_valid_ether_addr(dev->dev_addr)) {
76488- if (!local->open_count)
76489+ if (!local_read(&local->open_count))
76490 drv_stop(local);
76491 return -EADDRNOTAVAIL;
76492 }
76493@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76494 mutex_unlock(&local->mtx);
76495
76496 if (coming_up)
76497- local->open_count++;
76498+ local_inc(&local->open_count);
76499
76500 if (hw_reconf_flags)
76501 ieee80211_hw_config(local, hw_reconf_flags);
76502@@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76503 err_del_interface:
76504 drv_remove_interface(local, sdata);
76505 err_stop:
76506- if (!local->open_count)
76507+ if (!local_read(&local->open_count))
76508 drv_stop(local);
76509 err_del_bss:
76510 sdata->bss = NULL;
76511@@ -489,7 +489,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76512 }
76513
76514 if (going_down)
76515- local->open_count--;
76516+ local_dec(&local->open_count);
76517
76518 switch (sdata->vif.type) {
76519 case NL80211_IFTYPE_AP_VLAN:
76520@@ -548,7 +548,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76521
76522 ieee80211_recalc_ps(local, -1);
76523
76524- if (local->open_count == 0) {
76525+ if (local_read(&local->open_count) == 0) {
76526 if (local->ops->napi_poll)
76527 napi_disable(&local->napi);
76528 ieee80211_clear_tx_pending(local);
76529diff --git a/net/mac80211/main.c b/net/mac80211/main.c
76530index b142bd4..a651749 100644
76531--- a/net/mac80211/main.c
76532+++ b/net/mac80211/main.c
76533@@ -166,7 +166,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
76534 local->hw.conf.power_level = power;
76535 }
76536
76537- if (changed && local->open_count) {
76538+ if (changed && local_read(&local->open_count)) {
76539 ret = drv_config(local, changed);
76540 /*
76541 * Goal:
76542diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
76543index 596efaf..8f1911f 100644
76544--- a/net/mac80211/pm.c
76545+++ b/net/mac80211/pm.c
76546@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76547 struct ieee80211_sub_if_data *sdata;
76548 struct sta_info *sta;
76549
76550- if (!local->open_count)
76551+ if (!local_read(&local->open_count))
76552 goto suspend;
76553
76554 ieee80211_scan_cancel(local);
76555@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76556 cancel_work_sync(&local->dynamic_ps_enable_work);
76557 del_timer_sync(&local->dynamic_ps_timer);
76558
76559- local->wowlan = wowlan && local->open_count;
76560+ local->wowlan = wowlan && local_read(&local->open_count);
76561 if (local->wowlan) {
76562 int err = drv_suspend(local, wowlan);
76563 if (err < 0) {
76564@@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76565 }
76566
76567 /* stop hardware - this must stop RX */
76568- if (local->open_count)
76569+ if (local_read(&local->open_count))
76570 ieee80211_stop_device(local);
76571
76572 suspend:
76573diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
76574index f9b8e81..bb89b46 100644
76575--- a/net/mac80211/rate.c
76576+++ b/net/mac80211/rate.c
76577@@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
76578
76579 ASSERT_RTNL();
76580
76581- if (local->open_count)
76582+ if (local_read(&local->open_count))
76583 return -EBUSY;
76584
76585 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
76586diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
76587index c97a065..ff61928 100644
76588--- a/net/mac80211/rc80211_pid_debugfs.c
76589+++ b/net/mac80211/rc80211_pid_debugfs.c
76590@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
76591
76592 spin_unlock_irqrestore(&events->lock, status);
76593
76594- if (copy_to_user(buf, pb, p))
76595+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
76596 return -EFAULT;
76597
76598 return p;
76599diff --git a/net/mac80211/util.c b/net/mac80211/util.c
76600index 9919892..8c49803 100644
76601--- a/net/mac80211/util.c
76602+++ b/net/mac80211/util.c
76603@@ -1143,7 +1143,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
76604 }
76605 #endif
76606 /* everything else happens only if HW was up & running */
76607- if (!local->open_count)
76608+ if (!local_read(&local->open_count))
76609 goto wake_up;
76610
76611 /*
76612diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
76613index f8ac4ef..b02560b 100644
76614--- a/net/netfilter/Kconfig
76615+++ b/net/netfilter/Kconfig
76616@@ -806,6 +806,16 @@ config NETFILTER_XT_MATCH_ESP
76617
76618 To compile it as a module, choose M here. If unsure, say N.
76619
76620+config NETFILTER_XT_MATCH_GRADM
76621+ tristate '"gradm" match support'
76622+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
76623+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
76624+ ---help---
76625+ The gradm match allows to match on grsecurity RBAC being enabled.
76626+ It is useful when iptables rules are applied early on bootup to
76627+ prevent connections to the machine (except from a trusted host)
76628+ while the RBAC system is disabled.
76629+
76630 config NETFILTER_XT_MATCH_HASHLIMIT
76631 tristate '"hashlimit" match support'
76632 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
76633diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
76634index 40f4c3d..0d5dd6b 100644
76635--- a/net/netfilter/Makefile
76636+++ b/net/netfilter/Makefile
76637@@ -83,6 +83,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
76638 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
76639 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
76640 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
76641+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
76642 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
76643 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
76644 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
76645diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
76646index 29fa5ba..8debc79 100644
76647--- a/net/netfilter/ipvs/ip_vs_conn.c
76648+++ b/net/netfilter/ipvs/ip_vs_conn.c
76649@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
76650 /* Increase the refcnt counter of the dest */
76651 atomic_inc(&dest->refcnt);
76652
76653- conn_flags = atomic_read(&dest->conn_flags);
76654+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
76655 if (cp->protocol != IPPROTO_UDP)
76656 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
76657 /* Bind with the destination and its corresponding transmitter */
76658@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
76659 atomic_set(&cp->refcnt, 1);
76660
76661 atomic_set(&cp->n_control, 0);
76662- atomic_set(&cp->in_pkts, 0);
76663+ atomic_set_unchecked(&cp->in_pkts, 0);
76664
76665 atomic_inc(&ipvs->conn_count);
76666 if (flags & IP_VS_CONN_F_NO_CPORT)
76667@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
76668
76669 /* Don't drop the entry if its number of incoming packets is not
76670 located in [0, 8] */
76671- i = atomic_read(&cp->in_pkts);
76672+ i = atomic_read_unchecked(&cp->in_pkts);
76673 if (i > 8 || i < 0) return 0;
76674
76675 if (!todrop_rate[i]) return 0;
76676diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
76677index 2555816..31492d9 100644
76678--- a/net/netfilter/ipvs/ip_vs_core.c
76679+++ b/net/netfilter/ipvs/ip_vs_core.c
76680@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
76681 ret = cp->packet_xmit(skb, cp, pd->pp);
76682 /* do not touch skb anymore */
76683
76684- atomic_inc(&cp->in_pkts);
76685+ atomic_inc_unchecked(&cp->in_pkts);
76686 ip_vs_conn_put(cp);
76687 return ret;
76688 }
76689@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
76690 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
76691 pkts = sysctl_sync_threshold(ipvs);
76692 else
76693- pkts = atomic_add_return(1, &cp->in_pkts);
76694+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76695
76696 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
76697 cp->protocol == IPPROTO_SCTP) {
76698diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
76699index b3afe18..08ec940 100644
76700--- a/net/netfilter/ipvs/ip_vs_ctl.c
76701+++ b/net/netfilter/ipvs/ip_vs_ctl.c
76702@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
76703 ip_vs_rs_hash(ipvs, dest);
76704 write_unlock_bh(&ipvs->rs_lock);
76705 }
76706- atomic_set(&dest->conn_flags, conn_flags);
76707+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
76708
76709 /* bind the service */
76710 if (!dest->svc) {
76711@@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
76712 " %-7s %-6d %-10d %-10d\n",
76713 &dest->addr.in6,
76714 ntohs(dest->port),
76715- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76716+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76717 atomic_read(&dest->weight),
76718 atomic_read(&dest->activeconns),
76719 atomic_read(&dest->inactconns));
76720@@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
76721 "%-7s %-6d %-10d %-10d\n",
76722 ntohl(dest->addr.ip),
76723 ntohs(dest->port),
76724- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76725+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76726 atomic_read(&dest->weight),
76727 atomic_read(&dest->activeconns),
76728 atomic_read(&dest->inactconns));
76729@@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
76730
76731 entry.addr = dest->addr.ip;
76732 entry.port = dest->port;
76733- entry.conn_flags = atomic_read(&dest->conn_flags);
76734+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
76735 entry.weight = atomic_read(&dest->weight);
76736 entry.u_threshold = dest->u_threshold;
76737 entry.l_threshold = dest->l_threshold;
76738@@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
76739 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
76740
76741 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
76742- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76743+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76744 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
76745 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
76746 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
76747diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
76748index 8a0d6d6..90ec197 100644
76749--- a/net/netfilter/ipvs/ip_vs_sync.c
76750+++ b/net/netfilter/ipvs/ip_vs_sync.c
76751@@ -649,7 +649,7 @@ control:
76752 * i.e only increment in_pkts for Templates.
76753 */
76754 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
76755- int pkts = atomic_add_return(1, &cp->in_pkts);
76756+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76757
76758 if (pkts % sysctl_sync_period(ipvs) != 1)
76759 return;
76760@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
76761
76762 if (opt)
76763 memcpy(&cp->in_seq, opt, sizeof(*opt));
76764- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76765+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76766 cp->state = state;
76767 cp->old_state = cp->state;
76768 /*
76769diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
76770index 7fd66de..e6fb361 100644
76771--- a/net/netfilter/ipvs/ip_vs_xmit.c
76772+++ b/net/netfilter/ipvs/ip_vs_xmit.c
76773@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
76774 else
76775 rc = NF_ACCEPT;
76776 /* do not touch skb anymore */
76777- atomic_inc(&cp->in_pkts);
76778+ atomic_inc_unchecked(&cp->in_pkts);
76779 goto out;
76780 }
76781
76782@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
76783 else
76784 rc = NF_ACCEPT;
76785 /* do not touch skb anymore */
76786- atomic_inc(&cp->in_pkts);
76787+ atomic_inc_unchecked(&cp->in_pkts);
76788 goto out;
76789 }
76790
76791diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
76792index 66b2c54..c7884e3 100644
76793--- a/net/netfilter/nfnetlink_log.c
76794+++ b/net/netfilter/nfnetlink_log.c
76795@@ -70,7 +70,7 @@ struct nfulnl_instance {
76796 };
76797
76798 static DEFINE_SPINLOCK(instances_lock);
76799-static atomic_t global_seq;
76800+static atomic_unchecked_t global_seq;
76801
76802 #define INSTANCE_BUCKETS 16
76803 static struct hlist_head instance_table[INSTANCE_BUCKETS];
76804@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
76805 /* global sequence number */
76806 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
76807 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
76808- htonl(atomic_inc_return(&global_seq)));
76809+ htonl(atomic_inc_return_unchecked(&global_seq)));
76810
76811 if (data_len) {
76812 struct nlattr *nla;
76813diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
76814new file mode 100644
76815index 0000000..6905327
76816--- /dev/null
76817+++ b/net/netfilter/xt_gradm.c
76818@@ -0,0 +1,51 @@
76819+/*
76820+ * gradm match for netfilter
76821